mpi/{stdin_sendrecv/src/stdin_sendrecv.cpp → stdin_bcast/src/stdin_bcast.cpp} RENAMED
@@ -1,91 +1,79 @@
1
  // Copyright 2021 Jeisson Hidalgo <jeisson.hidalgo@ucr.ac.cr> CC-BY 4.0
2
 
3
  #include <mpi.h>
4
  #include <cstdint>
5
  #include <cstdlib>
6
  #include <iostream>
7
  #include <sstream>
8
  #include <stdexcept>
9
  #include <vector>
10
 
 
11
  #define fail(msg) throw std::runtime_error(msg)
12
 
13
- void process_values(int process_number, int process_count
14
- , const char* process_hostname);
15
 
16
  int main(int argc, char* argv[]) {
17
  int error = EXIT_SUCCESS;
18
  if (MPI_Init(&argc, &argv) == MPI_SUCCESS) {
19
  int process_number = -1;
20
  MPI_Comm_rank(MPI_COMM_WORLD, &process_number);
21
 
22
  int process_count = -1;
23
  MPI_Comm_size(MPI_COMM_WORLD, &process_count);
24
 
25
  char process_hostname[MPI_MAX_PROCESSOR_NAME] = { '\0' };
26
  int hostname_length = -1;
27
  MPI_Get_processor_name(process_hostname, &hostname_length);
28
 
29
  try {
30
  const double start_time = MPI_Wtime();
31
- process_values(process_number, process_count, process_hostname);
32
  const double elapsed = MPI_Wtime() - start_time;
33
  std::cout << process_hostname << ":" << process_number
34
  << ".m: elapsed time " << elapsed << "s" << std::endl;
35
  } catch (const std::runtime_error& exception) {
36
  std::cerr << "error: " << exception.what() << std::endl;
37
  error = EXIT_FAILURE;
38
  }
39
 
40
  MPI_Finalize();
41
  } else {
42
  std::cerr << "error: could not init MPI" << std::endl;
43
  error = EXIT_FAILURE;
44
  }
45
  return error;
46
  }
47
 
48
- void process_values(int process_number, int process_count
49
- , const char* process_hostname) {
50
  std::vector<double> values;
51
  size_t value_count = 0;
52
 
53
  if (process_number == 0) {
54
  double value = 0.0;
55
  while (std::cin >> value) {
56
  values.push_back(value);
57
  }
58
 
59
  value_count = values.size();
 
60
 
61
- for (int target = 1; target < process_count; ++target) {
62
  static_assert(sizeof(value_count) == sizeof(uint64_t)
63
  , "update MPI_Send data type to match your architecture");
64
- if (MPI_Send(&value_count, /*count*/ 1, MPI_UINT64_T, target
65
- , /*tag*/ 0, MPI_COMM_WORLD) != MPI_SUCCESS) {
66
- fail("could not send value count");
67
- }
68
- if (MPI_Send(&values[0], value_count, MPI_DOUBLE, target
69
- , /*tag*/ 0, MPI_COMM_WORLD) != MPI_SUCCESS) {
70
- fail("could not send values");
71
- }
72
- }
73
- } else {
74
- if (MPI_Recv(&value_count, /*capacity*/ 1, MPI_UINT64_T, /*source*/ 0
75
- , /*tag*/ 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE) != MPI_SUCCESS ) {
76
- fail("could not receive value count");
77
  }
78
 
79
  values.resize(value_count);
80
 
81
- if (MPI_Recv(&values[0], /*capacity*/ value_count, MPI_DOUBLE, /*source*/ 0
82
- , /*tag*/ 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE) != MPI_SUCCESS ) {
83
- fail("could not receive values");
84
- }
85
  }
86
 
87
  for (size_t index = 0; index < values.size(); ++index) {
88
  std::cout << process_hostname << ":" << process_number << ".m: values["
89
  << index << "] == " << values[index] << std::endl;
90
  }
91
  }
1
  // Copyright 2021 Jeisson Hidalgo <jeisson.hidalgo@ucr.ac.cr> CC-BY 4.0
2
 
3
  #include <mpi.h>
4
  #include <cstdint>
5
  #include <cstdlib>
6
  #include <iostream>
7
  #include <sstream>
8
  #include <stdexcept>
9
  #include <vector>
10
 
11
+ // DistributedExeption(process_number, exception_code, msg)
12
  #define fail(msg) throw std::runtime_error(msg)
13
 
14
+ void process_values(int process_number, const char* process_hostname);
 
15
 
16
  int main(int argc, char* argv[]) {
17
  int error = EXIT_SUCCESS;
18
  if (MPI_Init(&argc, &argv) == MPI_SUCCESS) {
19
  int process_number = -1;
20
  MPI_Comm_rank(MPI_COMM_WORLD, &process_number);
21
 
22
  int process_count = -1;
23
  MPI_Comm_size(MPI_COMM_WORLD, &process_count);
24
 
25
  char process_hostname[MPI_MAX_PROCESSOR_NAME] = { '\0' };
26
  int hostname_length = -1;
27
  MPI_Get_processor_name(process_hostname, &hostname_length);
28
 
29
  try {
30
  const double start_time = MPI_Wtime();
31
+ process_values(process_number, process_hostname);
32
  const double elapsed = MPI_Wtime() - start_time;
33
  std::cout << process_hostname << ":" << process_number
34
  << ".m: elapsed time " << elapsed << "s" << std::endl;
35
  } catch (const std::runtime_error& exception) {
36
  std::cerr << "error: " << exception.what() << std::endl;
37
  error = EXIT_FAILURE;
38
  }
39
 
40
  MPI_Finalize();
41
  } else {
42
  std::cerr << "error: could not init MPI" << std::endl;
43
  error = EXIT_FAILURE;
44
  }
45
  return error;
46
  }
47
 
48
+ void process_values(int process_number, const char* process_hostname) {
 
49
  std::vector<double> values;
50
  size_t value_count = 0;
51
 
52
  if (process_number == 0) {
53
  double value = 0.0;
54
  while (std::cin >> value) {
55
  values.push_back(value);
56
  }
57
 
58
  value_count = values.size();
59
+ }
60
 
 
61
  static_assert(sizeof(value_count) == sizeof(uint64_t)
62
  , "update MPI_Send data type to match your architecture");
63
+ if (MPI_Bcast(&value_count, /*count*/ 1, MPI_UINT64_T, /*root*/ 0
64
+ , MPI_COMM_WORLD) != MPI_SUCCESS ) {
65
+ fail("could not broadcast value count");
 
 
 
 
 
 
 
 
 
 
66
  }
67
 
68
  values.resize(value_count);
69
 
70
+ if (MPI_Bcast(&values[0], value_count, MPI_DOUBLE, /*root*/ 0
71
+ , MPI_COMM_WORLD) != MPI_SUCCESS ) {
72
+ fail("could not broadcast values");
 
73
  }
74
 
75
  for (size_t index = 0; index < values.size(); ++index) {
76
  std::cout << process_hostname << ":" << process_number << ".m: values["
77
  << index << "] == " << values[index] << std::endl;
78
  }
79
  }