mpi/{hello_hybrid/hello_hybrid.cpp → hybrid_distr_arg/hybrid_distr_arg.cpp} RENAMED
@@ -1,31 +1,70 @@
 
 
1
  #include <iostream>
2
  #include <mpi.h>
3
  #include <omp.h>
4
 
 
 
 
 
 
 
 
 
 
 
 
5
  int main(int argc, char* argv[])
6
  {
7
  MPI_Init(&argc, &argv);
8
 
9
  int my_rank = -1;
10
  int process_count = -1;
11
 
12
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
13
  MPI_Comm_size(MPI_COMM_WORLD, &process_count);
14
 
15
  char hostname[MPI_MAX_PROCESSOR_NAME];
16
  int hostname_length = -1;
17
  MPI_Get_processor_name(hostname, &hostname_length);
18
 
19
- std::cout << "Hello from main thread of process " << my_rank
20
- << " of " << process_count << " on " << hostname << std::endl;
 
 
 
 
 
 
 
 
 
 
21
 
22
  #pragma omp parallel default(none) shared(my_rank, hostname, std::cout)
23
  {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  #pragma omp critical(stdout)
25
- std::cout << "\tHello from thread " << omp_get_thread_num()
26
- << " of " << omp_get_num_threads() << " of process "
27
- << my_rank << " on " << hostname << std::endl;
28
  }
 
 
29
 
30
  MPI_Finalize();
31
  }
1
+ #include <algorithm>
2
+ #include <cstdlib>
3
  #include <iostream>
4
  #include <mpi.h>
5
  #include <omp.h>
6
 
7
+ int calculate_start(int worker_id, int workers, int finish, int begin)
8
+ {
9
+ int range = finish - begin;
10
+ return begin + worker_id * (range / workers) + std::min(worker_id, range % workers);
11
+ }
12
+
13
+ int calculate_finish(int worker_id, int workers, int finish, int begin)
14
+ {
15
+ return calculate_start(worker_id + 1, workers, finish, begin);
16
+ }
17
+
18
  int main(int argc, char* argv[])
19
  {
20
  MPI_Init(&argc, &argv);
21
 
22
  int my_rank = -1;
23
  int process_count = -1;
24
 
25
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
26
  MPI_Comm_size(MPI_COMM_WORLD, &process_count);
27
 
28
  char hostname[MPI_MAX_PROCESSOR_NAME];
29
  int hostname_length = -1;
30
  MPI_Get_processor_name(hostname, &hostname_length);
31
 
32
+ if ( argc == 3 )
33
+ {
34
+ const int global_start = atoi(argv[1]);
35
+ const int global_finish = atoi(argv[2]);
36
+
37
+ const int my_start = calculate_start( my_rank, process_count, global_finish, global_start);
38
+ const int my_finish = calculate_finish( my_rank, process_count, global_finish, global_start);
39
+ const int my_width = my_finish - my_start;
40
+
41
+ // hostname1:0: range [3, 12[ size 9
42
+ std::cout << hostname << ":" << my_rank << ": range [" << my_start
43
+ << ", " << my_finish << "[ size " << my_width << std::endl;
44
 
45
  #pragma omp parallel default(none) shared(my_rank, hostname, std::cout)
46
  {
47
+ int my_thread_start = 0;
48
+ int my_thread_finish = 0;
49
+
50
+ #pragma omp for
51
+ for ( int index = my_start; index < my_finish; ++index )
52
+ {
53
+ if ( my_thread_start == 0 )
54
+ my_thread_start = index;
55
+ my_thread_finish = index;
56
+ }
57
+
58
+ const int my_thread_width = ++my_thread_finish - my_thread_start;
59
+
60
+ // hostname1:0.0: range [3,6[ size 3
61
  #pragma omp critical(stdout)
62
+ std::cout << '\t' << hostname << ":" << my_rank << ":" << omp_get_thread_num() << ": range ["
63
+ << my_thread_start << "," << my_thread_finish << "[ size " << my_thread_width << std::endl;
64
+ }
65
  }
66
+ else
67
+ std::cerr << "usage: hybrid_distr_arg min max" << std::endl;
68
 
69
  MPI_Finalize();
70
  }