LLVM OpenMP* Runtime Library
kmp_dispatch_hier.h
1 /*
2  * kmp_dispatch_hier.h -- hierarchical scheduling methods and data structures
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef KMP_DISPATCH_HIER_H
14 #define KMP_DISPATCH_HIER_H
15 #include "kmp.h"
16 #include "kmp_dispatch.h"
17 
18 // Layer type for scheduling hierarchy
19 enum kmp_hier_layer_e {
20  LAYER_THREAD = -1,
21  LAYER_L1,
22  LAYER_L2,
23  LAYER_L3,
24  LAYER_NUMA,
25  LAYER_LOOP,
26  LAYER_LAST
27 };
28 
29 // Convert hierarchy type (LAYER_L1, LAYER_L2, etc.) to C-style string
30 static inline const char *__kmp_get_hier_str(kmp_hier_layer_e type) {
31  switch (type) {
32  case kmp_hier_layer_e::LAYER_THREAD:
33  return "THREAD";
34  case kmp_hier_layer_e::LAYER_L1:
35  return "L1";
36  case kmp_hier_layer_e::LAYER_L2:
37  return "L2";
38  case kmp_hier_layer_e::LAYER_L3:
39  return "L3";
40  case kmp_hier_layer_e::LAYER_NUMA:
41  return "NUMA";
42  case kmp_hier_layer_e::LAYER_LOOP:
43  return "WHOLE_LOOP";
44  case kmp_hier_layer_e::LAYER_LAST:
45  return "LAST";
46  }
47  KMP_ASSERT(0);
48  // Appease compilers, should never get here
49  return "ERROR";
50 }
51 
52 // Structure to store values parsed from OMP_SCHEDULE for scheduling hierarchy
53 typedef struct kmp_hier_sched_env_t {
54  int size;
55  int capacity;
56  enum sched_type *scheds;
57  kmp_int32 *small_chunks;
58  kmp_int64 *large_chunks;
59  kmp_hier_layer_e *layers;
60  // Append a level of the hierarchy
61  void append(enum sched_type sched, kmp_int32 chunk, kmp_hier_layer_e layer) {
62  if (capacity == 0) {
63  scheds = (enum sched_type *)__kmp_allocate(sizeof(enum sched_type) *
64  kmp_hier_layer_e::LAYER_LAST);
65  small_chunks = (kmp_int32 *)__kmp_allocate(sizeof(kmp_int32) *
66  kmp_hier_layer_e::LAYER_LAST);
67  large_chunks = (kmp_int64 *)__kmp_allocate(sizeof(kmp_int64) *
68  kmp_hier_layer_e::LAYER_LAST);
69  layers = (kmp_hier_layer_e *)__kmp_allocate(sizeof(kmp_hier_layer_e) *
70  kmp_hier_layer_e::LAYER_LAST);
71  capacity = kmp_hier_layer_e::LAYER_LAST;
72  }
73  int current_size = size;
74  KMP_DEBUG_ASSERT(current_size < kmp_hier_layer_e::LAYER_LAST);
75  scheds[current_size] = sched;
76  layers[current_size] = layer;
77  small_chunks[current_size] = chunk;
78  large_chunks[current_size] = (kmp_int64)chunk;
79  size++;
80  }
81  // Sort the hierarchy using selection sort, size will always be small
82  // (less than LAYER_LAST) so it is not necessary to use an nlog(n) algorithm
83  void sort() {
84  if (size <= 1)
85  return;
86  for (int i = 0; i < size; ++i) {
87  int switch_index = i;
88  for (int j = i + 1; j < size; ++j) {
89  if (layers[j] < layers[switch_index])
90  switch_index = j;
91  }
92  if (switch_index != i) {
93  kmp_hier_layer_e temp1 = layers[i];
94  enum sched_type temp2 = scheds[i];
95  kmp_int32 temp3 = small_chunks[i];
96  kmp_int64 temp4 = large_chunks[i];
97  layers[i] = layers[switch_index];
98  scheds[i] = scheds[switch_index];
99  small_chunks[i] = small_chunks[switch_index];
100  large_chunks[i] = large_chunks[switch_index];
101  layers[switch_index] = temp1;
102  scheds[switch_index] = temp2;
103  small_chunks[switch_index] = temp3;
104  large_chunks[switch_index] = temp4;
105  }
106  }
107  }
108  // Free all memory
109  void deallocate() {
110  if (capacity > 0) {
111  __kmp_free(scheds);
112  __kmp_free(layers);
113  __kmp_free(small_chunks);
114  __kmp_free(large_chunks);
115  scheds = NULL;
116  layers = NULL;
117  small_chunks = NULL;
118  large_chunks = NULL;
119  }
120  size = 0;
121  capacity = 0;
122  }
123 } kmp_hier_sched_env_t;
124 
125 extern int __kmp_dispatch_hand_threading;
126 extern kmp_hier_sched_env_t __kmp_hier_scheds;
127 
128 // Sizes of layer arrays bounded by max number of detected L1s, L2s, etc.
129 extern int __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LAST + 1];
130 extern int __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LAST + 1];
131 
132 extern int __kmp_dispatch_get_index(int tid, kmp_hier_layer_e type);
133 extern int __kmp_dispatch_get_id(int gtid, kmp_hier_layer_e type);
134 extern int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1,
135  kmp_hier_layer_e t2);
136 extern void __kmp_dispatch_free_hierarchies(kmp_team_t *team);
137 
138 template <typename T> struct kmp_hier_shared_bdata_t {
139  typedef typename traits_t<T>::signed_t ST;
140  volatile kmp_uint64 val[2];
141  kmp_int32 status[2];
142  T lb[2];
143  T ub[2];
144  ST st[2];
145  dispatch_shared_info_template<T> sh[2];
146  void zero() {
147  val[0] = val[1] = 0;
148  status[0] = status[1] = 0;
149  lb[0] = lb[1] = 0;
150  ub[0] = ub[1] = 0;
151  st[0] = st[1] = 0;
152  sh[0].u.s.iteration = sh[1].u.s.iteration = 0;
153  }
154  void set_next_hand_thread(T nlb, T nub, ST nst, kmp_int32 nstatus,
155  kmp_uint64 index) {
156  lb[1 - index] = nlb;
157  ub[1 - index] = nub;
158  st[1 - index] = nst;
159  status[1 - index] = nstatus;
160  }
161  void set_next(T nlb, T nub, ST nst, kmp_int32 nstatus, kmp_uint64 index) {
162  lb[1 - index] = nlb;
163  ub[1 - index] = nub;
164  st[1 - index] = nst;
165  status[1 - index] = nstatus;
166  sh[1 - index].u.s.iteration = 0;
167  }
168 
169  kmp_int32 get_next_status(kmp_uint64 index) const {
170  return status[1 - index];
171  }
172  T get_next_lb(kmp_uint64 index) const { return lb[1 - index]; }
173  T get_next_ub(kmp_uint64 index) const { return ub[1 - index]; }
174  ST get_next_st(kmp_uint64 index) const { return st[1 - index]; }
175  dispatch_shared_info_template<T> volatile *get_next_sh(kmp_uint64 index) {
176  return &(sh[1 - index]);
177  }
178 
179  kmp_int32 get_curr_status(kmp_uint64 index) const { return status[index]; }
180  T get_curr_lb(kmp_uint64 index) const { return lb[index]; }
181  T get_curr_ub(kmp_uint64 index) const { return ub[index]; }
182  ST get_curr_st(kmp_uint64 index) const { return st[index]; }
183  dispatch_shared_info_template<T> volatile *get_curr_sh(kmp_uint64 index) {
184  return &(sh[index]);
185  }
186 };
187 
188 /*
189  * In the barrier implementations, num_active is the number of threads that are
190  * attached to the kmp_hier_top_unit_t structure in the scheduling hierarchy.
191  * bdata is the shared barrier data that resides on the kmp_hier_top_unit_t
192  * structure. tdata is the thread private data that resides on the thread
193  * data structure.
194  *
195  * The reset_shared() method is used to initialize the barrier data on the
196  * kmp_hier_top_unit_t hierarchy structure
197  *
198  * The reset_private() method is used to initialize the barrier data on the
199  * thread's private dispatch buffer structure
200  *
201  * The barrier() method takes an id, which is that thread's id for the
202  * kmp_hier_top_unit_t structure, and implements the barrier. All threads wait
203  * inside barrier() until all fellow threads who are attached to that
204  * kmp_hier_top_unit_t structure have arrived.
205  */
206 
207 // Core barrier implementation
208 // Can be used in a unit with between 2 to 8 threads
209 template <typename T> class core_barrier_impl {
210  static inline kmp_uint64 get_wait_val(int num_active) {
211  kmp_uint64 wait_val;
212  switch (num_active) {
213  case 2:
214  wait_val = 0x0101LL;
215  break;
216  case 3:
217  wait_val = 0x010101LL;
218  break;
219  case 4:
220  wait_val = 0x01010101LL;
221  break;
222  case 5:
223  wait_val = 0x0101010101LL;
224  break;
225  case 6:
226  wait_val = 0x010101010101LL;
227  break;
228  case 7:
229  wait_val = 0x01010101010101LL;
230  break;
231  case 8:
232  wait_val = 0x0101010101010101LL;
233  break;
234  default:
235  // don't use the core_barrier_impl for more than 8 threads
236  KMP_ASSERT(0);
237  }
238  return wait_val;
239  }
240 
241 public:
242  static void reset_private(kmp_int32 num_active,
243  kmp_hier_private_bdata_t *tdata);
244  static void reset_shared(kmp_int32 num_active,
245  kmp_hier_shared_bdata_t<T> *bdata);
246  static void barrier(kmp_int32 id, kmp_hier_shared_bdata_t<T> *bdata,
247  kmp_hier_private_bdata_t *tdata);
248 };
249 
250 template <typename T>
251 void core_barrier_impl<T>::reset_private(kmp_int32 num_active,
252  kmp_hier_private_bdata_t *tdata) {
253  tdata->num_active = num_active;
254  tdata->index = 0;
255  tdata->wait_val[0] = tdata->wait_val[1] = get_wait_val(num_active);
256 }
257 template <typename T>
258 void core_barrier_impl<T>::reset_shared(kmp_int32 num_active,
259  kmp_hier_shared_bdata_t<T> *bdata) {
260  bdata->val[0] = bdata->val[1] = 0LL;
261  bdata->status[0] = bdata->status[1] = 0LL;
262 }
263 template <typename T>
264 void core_barrier_impl<T>::barrier(kmp_int32 id,
265  kmp_hier_shared_bdata_t<T> *bdata,
266  kmp_hier_private_bdata_t *tdata) {
267  kmp_uint64 current_index = tdata->index;
268  kmp_uint64 next_index = 1 - current_index;
269  kmp_uint64 current_wait_value = tdata->wait_val[current_index];
270  kmp_uint64 next_wait_value =
271  (current_wait_value ? 0 : get_wait_val(tdata->num_active));
272  KD_TRACE(10, ("core_barrier_impl::barrier(): T#%d current_index:%llu "
273  "next_index:%llu curr_wait:%llu next_wait:%llu\n",
274  __kmp_get_gtid(), current_index, next_index, current_wait_value,
275  next_wait_value));
276  char v = (current_wait_value ? 0x1 : 0x0);
277  (RCAST(volatile char *, &(bdata->val[current_index])))[id] = v;
278  __kmp_wait<kmp_uint64>(&(bdata->val[current_index]), current_wait_value,
279  __kmp_eq<kmp_uint64> USE_ITT_BUILD_ARG(NULL));
280  tdata->wait_val[current_index] = next_wait_value;
281  tdata->index = next_index;
282 }
283 
284 // Counter barrier implementation
285 // Can be used in a unit with arbitrary number of active threads
286 template <typename T> class counter_barrier_impl {
287 public:
288  static void reset_private(kmp_int32 num_active,
289  kmp_hier_private_bdata_t *tdata);
290  static void reset_shared(kmp_int32 num_active,
291  kmp_hier_shared_bdata_t<T> *bdata);
292  static void barrier(kmp_int32 id, kmp_hier_shared_bdata_t<T> *bdata,
293  kmp_hier_private_bdata_t *tdata);
294 };
295 
296 template <typename T>
297 void counter_barrier_impl<T>::reset_private(kmp_int32 num_active,
298  kmp_hier_private_bdata_t *tdata) {
299  tdata->num_active = num_active;
300  tdata->index = 0;
301  tdata->wait_val[0] = tdata->wait_val[1] = (kmp_uint64)num_active;
302 }
303 template <typename T>
304 void counter_barrier_impl<T>::reset_shared(kmp_int32 num_active,
305  kmp_hier_shared_bdata_t<T> *bdata) {
306  bdata->val[0] = bdata->val[1] = 0LL;
307  bdata->status[0] = bdata->status[1] = 0LL;
308 }
309 template <typename T>
310 void counter_barrier_impl<T>::barrier(kmp_int32 id,
311  kmp_hier_shared_bdata_t<T> *bdata,
312  kmp_hier_private_bdata_t *tdata) {
313  volatile kmp_int64 *val;
314  kmp_uint64 current_index = tdata->index;
315  kmp_uint64 next_index = 1 - current_index;
316  kmp_uint64 current_wait_value = tdata->wait_val[current_index];
317  kmp_uint64 next_wait_value = current_wait_value + tdata->num_active;
318 
319  KD_TRACE(10, ("counter_barrier_impl::barrier(): T#%d current_index:%llu "
320  "next_index:%llu curr_wait:%llu next_wait:%llu\n",
321  __kmp_get_gtid(), current_index, next_index, current_wait_value,
322  next_wait_value));
323  val = RCAST(volatile kmp_int64 *, &(bdata->val[current_index]));
324  KMP_TEST_THEN_INC64(val);
325  __kmp_wait<kmp_uint64>(&(bdata->val[current_index]), current_wait_value,
326  __kmp_ge<kmp_uint64> USE_ITT_BUILD_ARG(NULL));
327  tdata->wait_val[current_index] = next_wait_value;
328  tdata->index = next_index;
329 }
330 
331 // Data associated with topology unit within a layer
332 // For example, one kmp_hier_top_unit_t corresponds to one L1 cache
333 template <typename T> struct kmp_hier_top_unit_t {
334  typedef typename traits_t<T>::signed_t ST;
335  typedef typename traits_t<T>::unsigned_t UT;
336  kmp_int32 active; // number of topology units that communicate with this unit
337  // chunk information (lower/upper bound, stride, etc.)
338  dispatch_private_info_template<T> hier_pr;
339  kmp_hier_top_unit_t<T> *hier_parent; // pointer to parent unit
340  kmp_hier_shared_bdata_t<T> hier_barrier; // shared barrier data for this unit
341 
342  kmp_int32 get_hier_id() const { return hier_pr.hier_id; }
343  void reset_shared_barrier() {
344  KMP_DEBUG_ASSERT(active > 0);
345  if (active == 1)
346  return;
347  hier_barrier.zero();
348  if (active >= 2 && active <= 8) {
349  core_barrier_impl<T>::reset_shared(active, &hier_barrier);
350  } else {
351  counter_barrier_impl<T>::reset_shared(active, &hier_barrier);
352  }
353  }
354  void reset_private_barrier(kmp_hier_private_bdata_t *tdata) {
355  KMP_DEBUG_ASSERT(tdata);
356  KMP_DEBUG_ASSERT(active > 0);
357  if (active == 1)
358  return;
359  if (active >= 2 && active <= 8) {
360  core_barrier_impl<T>::reset_private(active, tdata);
361  } else {
362  counter_barrier_impl<T>::reset_private(active, tdata);
363  }
364  }
365  void barrier(kmp_int32 id, kmp_hier_private_bdata_t *tdata) {
366  KMP_DEBUG_ASSERT(tdata);
367  KMP_DEBUG_ASSERT(active > 0);
368  KMP_DEBUG_ASSERT(id >= 0 && id < active);
369  if (active == 1) {
370  tdata->index = 1 - tdata->index;
371  return;
372  }
373  if (active >= 2 && active <= 8) {
374  core_barrier_impl<T>::barrier(id, &hier_barrier, tdata);
375  } else {
376  counter_barrier_impl<T>::barrier(id, &hier_barrier, tdata);
377  }
378  }
379 
380  kmp_int32 get_next_status(kmp_uint64 index) const {
381  return hier_barrier.get_next_status(index);
382  }
383  T get_next_lb(kmp_uint64 index) const {
384  return hier_barrier.get_next_lb(index);
385  }
386  T get_next_ub(kmp_uint64 index) const {
387  return hier_barrier.get_next_ub(index);
388  }
389  ST get_next_st(kmp_uint64 index) const {
390  return hier_barrier.get_next_st(index);
391  }
392  dispatch_shared_info_template<T> volatile *get_next_sh(kmp_uint64 index) {
393  return hier_barrier.get_next_sh(index);
394  }
395 
396  kmp_int32 get_curr_status(kmp_uint64 index) const {
397  return hier_barrier.get_curr_status(index);
398  }
399  T get_curr_lb(kmp_uint64 index) const {
400  return hier_barrier.get_curr_lb(index);
401  }
402  T get_curr_ub(kmp_uint64 index) const {
403  return hier_barrier.get_curr_ub(index);
404  }
405  ST get_curr_st(kmp_uint64 index) const {
406  return hier_barrier.get_curr_st(index);
407  }
408  dispatch_shared_info_template<T> volatile *get_curr_sh(kmp_uint64 index) {
409  return hier_barrier.get_curr_sh(index);
410  }
411 
412  void set_next_hand_thread(T lb, T ub, ST st, kmp_int32 status,
413  kmp_uint64 index) {
414  hier_barrier.set_next_hand_thread(lb, ub, st, status, index);
415  }
416  void set_next(T lb, T ub, ST st, kmp_int32 status, kmp_uint64 index) {
417  hier_barrier.set_next(lb, ub, st, status, index);
418  }
419  dispatch_private_info_template<T> *get_my_pr() { return &hier_pr; }
420  kmp_hier_top_unit_t<T> *get_parent() { return hier_parent; }
421  dispatch_private_info_template<T> *get_parent_pr() {
422  return &(hier_parent->hier_pr);
423  }
424 
425  kmp_int32 is_active() const { return active; }
426  kmp_int32 get_num_active() const { return active; }
427  void print() {
428  KD_TRACE(
429  10,
430  (" kmp_hier_top_unit_t: active:%d pr:%p lb:%d ub:%d st:%d tc:%d\n",
431  active, &hier_pr, hier_pr.u.p.lb, hier_pr.u.p.ub, hier_pr.u.p.st,
432  hier_pr.u.p.tc));
433  }
434 };
435 
436 // Information regarding a single layer within the scheduling hierarchy
437 template <typename T> struct kmp_hier_layer_info_t {
438  int num_active; // number of threads active in this level
439  kmp_hier_layer_e type; // LAYER_L1, LAYER_L2, etc.
440  enum sched_type sched; // static, dynamic, guided, etc.
441  typename traits_t<T>::signed_t chunk; // chunk size associated with schedule
442  int length; // length of the kmp_hier_top_unit_t array
443 
444  // Print this layer's information
445  void print() {
446  const char *t = __kmp_get_hier_str(type);
447  KD_TRACE(
448  10,
449  (" kmp_hier_layer_info_t: num_active:%d type:%s sched:%d chunk:%d "
450  "length:%d\n",
451  num_active, t, sched, chunk, length));
452  }
453 };
454 
455 /*
456  * Structure to implement entire hierarchy
457  *
458  * The hierarchy is kept as an array of arrays to represent the different
459  * layers. Layer 0 is the lowest layer to layer num_layers - 1 which is the
460  * highest layer.
461  * Example:
462  * [ 2 ] -> [ L3 | L3 ]
463  * [ 1 ] -> [ L2 | L2 | L2 | L2 ]
464  * [ 0 ] -> [ L1 | L1 | L1 | L1 | L1 | L1 | L1 | L1 ]
465  * There is also an array of layer_info_t which has information regarding
466  * each layer
467  */
468 template <typename T> struct kmp_hier_t {
469 public:
470  typedef typename traits_t<T>::unsigned_t UT;
471  typedef typename traits_t<T>::signed_t ST;
472 
473 private:
474  int next_recurse(ident_t *loc, int gtid, kmp_hier_top_unit_t<T> *current,
475  kmp_int32 *p_last, T *p_lb, T *p_ub, ST *p_st,
476  kmp_int32 previous_id, int hier_level) {
477  int status;
478  kmp_info_t *th = __kmp_threads[gtid];
479  auto parent = current->get_parent();
480  bool last_layer = (hier_level == get_num_layers() - 1);
481  KMP_DEBUG_ASSERT(th);
482  kmp_hier_private_bdata_t *tdata = &(th->th.th_hier_bar_data[hier_level]);
483  KMP_DEBUG_ASSERT(current);
484  KMP_DEBUG_ASSERT(hier_level >= 0);
485  KMP_DEBUG_ASSERT(hier_level < get_num_layers());
486  KMP_DEBUG_ASSERT(tdata);
487  KMP_DEBUG_ASSERT(parent || last_layer);
488 
489  KD_TRACE(
490  1, ("kmp_hier_t.next_recurse(): T#%d (%d) called\n", gtid, hier_level));
491 
492  T hier_id = (T)current->get_hier_id();
493  // Attempt to grab next iteration range for this level
494  if (previous_id == 0) {
495  KD_TRACE(1, ("kmp_hier_t.next_recurse(): T#%d (%d) is master of unit\n",
496  gtid, hier_level));
497  kmp_int32 contains_last;
498  T my_lb, my_ub;
499  ST my_st;
500  T nproc;
501  dispatch_shared_info_template<T> volatile *my_sh;
502  dispatch_private_info_template<T> *my_pr;
503  if (last_layer) {
504  // last layer below the very top uses the single shared buffer
505  // from the team struct.
506  KD_TRACE(10,
507  ("kmp_hier_t.next_recurse(): T#%d (%d) using top level sh\n",
508  gtid, hier_level));
509  my_sh = reinterpret_cast<dispatch_shared_info_template<T> volatile *>(
510  th->th.th_dispatch->th_dispatch_sh_current);
511  nproc = (T)get_top_level_nproc();
512  } else {
513  // middle layers use the shared buffer inside the kmp_hier_top_unit_t
514  // structure
515  KD_TRACE(10, ("kmp_hier_t.next_recurse(): T#%d (%d) using hier sh\n",
516  gtid, hier_level));
517  my_sh =
518  parent->get_curr_sh(th->th.th_hier_bar_data[hier_level + 1].index);
519  nproc = (T)parent->get_num_active();
520  }
521  my_pr = current->get_my_pr();
522  KMP_DEBUG_ASSERT(my_sh);
523  KMP_DEBUG_ASSERT(my_pr);
524  enum sched_type schedule = get_sched(hier_level);
525  ST chunk = (ST)get_chunk(hier_level);
526  status = __kmp_dispatch_next_algorithm<T>(gtid, my_pr, my_sh,
527  &contains_last, &my_lb, &my_ub,
528  &my_st, nproc, hier_id);
529  KD_TRACE(
530  10,
531  ("kmp_hier_t.next_recurse(): T#%d (%d) next_pr_sh() returned %d\n",
532  gtid, hier_level, status));
533  // When no iterations are found (status == 0) and this is not the last
534  // layer, attempt to go up the hierarchy for more iterations
535  if (status == 0 && !last_layer) {
536  status = next_recurse(loc, gtid, parent, &contains_last, &my_lb, &my_ub,
537  &my_st, hier_id, hier_level + 1);
538  KD_TRACE(
539  10,
540  ("kmp_hier_t.next_recurse(): T#%d (%d) hier_next() returned %d\n",
541  gtid, hier_level, status));
542  if (status == 1) {
543  kmp_hier_private_bdata_t *upper_tdata =
544  &(th->th.th_hier_bar_data[hier_level + 1]);
545  my_sh = parent->get_curr_sh(upper_tdata->index);
546  KD_TRACE(10, ("kmp_hier_t.next_recurse(): T#%d (%d) about to init\n",
547  gtid, hier_level));
548  __kmp_dispatch_init_algorithm(loc, gtid, my_pr, schedule,
549  parent->get_curr_lb(upper_tdata->index),
550  parent->get_curr_ub(upper_tdata->index),
551  parent->get_curr_st(upper_tdata->index),
552 #if USE_ITT_BUILD
553  NULL,
554 #endif
555  chunk, nproc, hier_id);
556  status = __kmp_dispatch_next_algorithm<T>(
557  gtid, my_pr, my_sh, &contains_last, &my_lb, &my_ub, &my_st, nproc,
558  hier_id);
559  if (!status) {
560  KD_TRACE(10, ("kmp_hier_t.next_recurse(): T#%d (%d) status not 1 "
561  "setting to 2!\n",
562  gtid, hier_level));
563  status = 2;
564  }
565  }
566  }
567  current->set_next(my_lb, my_ub, my_st, status, tdata->index);
568  // Propagate whether a unit holds the actual global last iteration
569  // The contains_last attribute is sent downwards from the top to the
570  // bottom of the hierarchy via the contains_last flag inside the
571  // private dispatch buffers in the hierarchy's middle layers
572  if (contains_last) {
573  // If the next_algorithm() method returns 1 for p_last and it is the
574  // last layer or our parent contains the last serial chunk, then the
575  // chunk must contain the last serial iteration.
576  if (last_layer || parent->hier_pr.flags.contains_last) {
577  KD_TRACE(10, ("kmp_hier_t.next_recurse(): T#%d (%d) Setting this pr "
578  "to contain last.\n",
579  gtid, hier_level));
580  current->hier_pr.flags.contains_last = contains_last;
581  }
582  if (!current->hier_pr.flags.contains_last)
583  contains_last = FALSE;
584  }
585  if (p_last)
586  *p_last = contains_last;
587  } // if master thread of this unit
588  if (hier_level > 0 || !__kmp_dispatch_hand_threading) {
589  KD_TRACE(10,
590  ("kmp_hier_t.next_recurse(): T#%d (%d) going into barrier.\n",
591  gtid, hier_level));
592  current->barrier(previous_id, tdata);
593  KD_TRACE(10,
594  ("kmp_hier_t.next_recurse(): T#%d (%d) released and exit %d\n",
595  gtid, hier_level, current->get_curr_status(tdata->index)));
596  } else {
597  KMP_DEBUG_ASSERT(previous_id == 0);
598  return status;
599  }
600  return current->get_curr_status(tdata->index);
601  }
602 
603 public:
604  int top_level_nproc;
605  int num_layers;
606  bool valid;
607  int type_size;
608  kmp_hier_layer_info_t<T> *info;
609  kmp_hier_top_unit_t<T> **layers;
610  // Deallocate all memory from this hierarchy
611  void deallocate() {
612  for (int i = 0; i < num_layers; ++i)
613  if (layers[i] != NULL) {
614  __kmp_free(layers[i]);
615  }
616  if (layers != NULL) {
617  __kmp_free(layers);
618  layers = NULL;
619  }
620  if (info != NULL) {
621  __kmp_free(info);
622  info = NULL;
623  }
624  num_layers = 0;
625  valid = false;
626  }
627  // Returns true if reallocation is needed else false
628  bool need_to_reallocate(int n, const kmp_hier_layer_e *new_layers,
629  const enum sched_type *new_scheds,
630  const ST *new_chunks) const {
631  if (!valid || layers == NULL || info == NULL ||
632  traits_t<T>::type_size != type_size || n != num_layers)
633  return true;
634  for (int i = 0; i < n; ++i) {
635  if (info[i].type != new_layers[i])
636  return true;
637  if (info[i].sched != new_scheds[i])
638  return true;
639  if (info[i].chunk != new_chunks[i])
640  return true;
641  }
642  return false;
643  }
644  // A single thread should call this function while the other threads wait
645  // create a new scheduling hierarchy consisting of new_layers, new_scheds
646  // and new_chunks. These should come pre-sorted according to
647  // kmp_hier_layer_e value. This function will try to avoid reallocation
648  // if it can
649  void allocate_hier(int n, const kmp_hier_layer_e *new_layers,
650  const enum sched_type *new_scheds, const ST *new_chunks) {
651  top_level_nproc = 0;
652  if (!need_to_reallocate(n, new_layers, new_scheds, new_chunks)) {
653  KD_TRACE(
654  10,
655  ("kmp_hier_t<T>::allocate_hier: T#0 do not need to reallocate\n"));
656  for (int i = 0; i < n; ++i) {
657  info[i].num_active = 0;
658  for (int j = 0; j < get_length(i); ++j)
659  layers[i][j].active = 0;
660  }
661  return;
662  }
663  KD_TRACE(10, ("kmp_hier_t<T>::allocate_hier: T#0 full alloc\n"));
664  deallocate();
665  type_size = traits_t<T>::type_size;
666  num_layers = n;
667  info = (kmp_hier_layer_info_t<T> *)__kmp_allocate(
668  sizeof(kmp_hier_layer_info_t<T>) * n);
669  layers = (kmp_hier_top_unit_t<T> **)__kmp_allocate(
670  sizeof(kmp_hier_top_unit_t<T> *) * n);
671  for (int i = 0; i < n; ++i) {
672  int max = 0;
673  kmp_hier_layer_e layer = new_layers[i];
674  info[i].num_active = 0;
675  info[i].type = layer;
676  info[i].sched = new_scheds[i];
677  info[i].chunk = new_chunks[i];
678  max = __kmp_hier_max_units[layer + 1];
679  if (max == 0) {
680  valid = false;
681  KMP_WARNING(HierSchedInvalid, __kmp_get_hier_str(layer));
682  deallocate();
683  return;
684  }
685  info[i].length = max;
686  layers[i] = (kmp_hier_top_unit_t<T> *)__kmp_allocate(
687  sizeof(kmp_hier_top_unit_t<T>) * max);
688  for (int j = 0; j < max; ++j) {
689  layers[i][j].active = 0;
690  }
691  }
692  valid = true;
693  }
694  // loc - source file location
695  // gtid - global thread identifier
696  // pr - this thread's private dispatch buffer (corresponding with gtid)
697  // p_last (return value) - pointer to flag indicating this set of iterations
698  // contains last
699  // iteration
700  // p_lb (return value) - lower bound for this chunk of iterations
701  // p_ub (return value) - upper bound for this chunk of iterations
702  // p_st (return value) - stride for this chunk of iterations
703  //
704  // Returns 1 if there are more iterations to perform, 0 otherwise
705  int next(ident_t *loc, int gtid, dispatch_private_info_template<T> *pr,
706  kmp_int32 *p_last, T *p_lb, T *p_ub, ST *p_st) {
707  int status;
708  kmp_int32 contains_last = 0;
709  kmp_info_t *th = __kmp_threads[gtid];
710  kmp_hier_private_bdata_t *tdata = &(th->th.th_hier_bar_data[0]);
711  auto parent = pr->get_parent();
712  KMP_DEBUG_ASSERT(parent);
713  KMP_DEBUG_ASSERT(th);
714  KMP_DEBUG_ASSERT(tdata);
715  KMP_DEBUG_ASSERT(parent);
716  T nproc = (T)parent->get_num_active();
717  T unit_id = (T)pr->get_hier_id();
718  KD_TRACE(
719  10,
720  ("kmp_hier_t.next(): T#%d THREAD LEVEL nproc:%d unit_id:%d called\n",
721  gtid, nproc, unit_id));
722  // Handthreading implementation
723  // Each iteration is performed by all threads on last unit (typically
724  // cores/tiles)
725  // e.g., threads 0,1,2,3 all execute iteration 0
726  // threads 0,1,2,3 all execute iteration 1
727  // threads 4,5,6,7 all execute iteration 2
728  // threads 4,5,6,7 all execute iteration 3
729  // ... etc.
730  if (__kmp_dispatch_hand_threading) {
731  KD_TRACE(10,
732  ("kmp_hier_t.next(): T#%d THREAD LEVEL using hand threading\n",
733  gtid));
734  if (unit_id == 0) {
735  // For hand threading, the sh buffer on the lowest level is only ever
736  // modified and read by the master thread on that level. Because of
737  // this, we can always use the first sh buffer.
738  auto sh = &(parent->hier_barrier.sh[0]);
739  KMP_DEBUG_ASSERT(sh);
740  status = __kmp_dispatch_next_algorithm<T>(
741  gtid, pr, sh, &contains_last, p_lb, p_ub, p_st, nproc, unit_id);
742  if (!status) {
743  bool done = false;
744  while (!done) {
745  done = true;
746  status = next_recurse(loc, gtid, parent, &contains_last, p_lb, p_ub,
747  p_st, unit_id, 0);
748  if (status == 1) {
749  __kmp_dispatch_init_algorithm(loc, gtid, pr, pr->schedule,
750  parent->get_next_lb(tdata->index),
751  parent->get_next_ub(tdata->index),
752  parent->get_next_st(tdata->index),
753 #if USE_ITT_BUILD
754  NULL,
755 #endif
756  pr->u.p.parm1, nproc, unit_id);
757  sh->u.s.iteration = 0;
758  status = __kmp_dispatch_next_algorithm<T>(
759  gtid, pr, sh, &contains_last, p_lb, p_ub, p_st, nproc,
760  unit_id);
761  if (!status) {
762  KD_TRACE(10,
763  ("kmp_hier_t.next(): T#%d THREAD LEVEL status == 0 "
764  "after next_pr_sh()"
765  "trying again.\n",
766  gtid));
767  done = false;
768  }
769  } else if (status == 2) {
770  KD_TRACE(10, ("kmp_hier_t.next(): T#%d THREAD LEVEL status == 2 "
771  "trying again.\n",
772  gtid));
773  done = false;
774  }
775  }
776  }
777  parent->set_next_hand_thread(*p_lb, *p_ub, *p_st, status, tdata->index);
778  } // if master thread of lowest unit level
779  parent->barrier(pr->get_hier_id(), tdata);
780  if (unit_id != 0) {
781  *p_lb = parent->get_curr_lb(tdata->index);
782  *p_ub = parent->get_curr_ub(tdata->index);
783  *p_st = parent->get_curr_st(tdata->index);
784  status = parent->get_curr_status(tdata->index);
785  }
786  } else {
787  // Normal implementation
788  // Each thread grabs an iteration chunk and executes it (no cooperation)
789  auto sh = parent->get_curr_sh(tdata->index);
790  KMP_DEBUG_ASSERT(sh);
791  status = __kmp_dispatch_next_algorithm<T>(
792  gtid, pr, sh, &contains_last, p_lb, p_ub, p_st, nproc, unit_id);
793  KD_TRACE(10,
794  ("kmp_hier_t.next(): T#%d THREAD LEVEL next_algorithm status:%d "
795  "contains_last:%d p_lb:%d p_ub:%d p_st:%d\n",
796  gtid, status, contains_last, *p_lb, *p_ub, *p_st));
797  if (!status) {
798  bool done = false;
799  while (!done) {
800  done = true;
801  status = next_recurse(loc, gtid, parent, &contains_last, p_lb, p_ub,
802  p_st, unit_id, 0);
803  if (status == 1) {
804  sh = parent->get_curr_sh(tdata->index);
805  __kmp_dispatch_init_algorithm(loc, gtid, pr, pr->schedule,
806  parent->get_curr_lb(tdata->index),
807  parent->get_curr_ub(tdata->index),
808  parent->get_curr_st(tdata->index),
809 #if USE_ITT_BUILD
810  NULL,
811 #endif
812  pr->u.p.parm1, nproc, unit_id);
813  status = __kmp_dispatch_next_algorithm<T>(
814  gtid, pr, sh, &contains_last, p_lb, p_ub, p_st, nproc, unit_id);
815  if (!status) {
816  KD_TRACE(10, ("kmp_hier_t.next(): T#%d THREAD LEVEL status == 0 "
817  "after next_pr_sh()"
818  "trying again.\n",
819  gtid));
820  done = false;
821  }
822  } else if (status == 2) {
823  KD_TRACE(10, ("kmp_hier_t.next(): T#%d THREAD LEVEL status == 2 "
824  "trying again.\n",
825  gtid));
826  done = false;
827  }
828  }
829  }
830  }
831  if (contains_last && !parent->hier_pr.flags.contains_last) {
832  KD_TRACE(10, ("kmp_hier_t.next(): T#%d THREAD LEVEL resetting "
833  "contains_last to FALSE\n",
834  gtid));
835  contains_last = FALSE;
836  }
837  if (p_last)
838  *p_last = contains_last;
839  KD_TRACE(10, ("kmp_hier_t.next(): T#%d THREAD LEVEL exit status %d\n", gtid,
840  status));
841  return status;
842  }
843  // These functions probe the layer info structure
844  // Returns the type of topology unit given level
845  kmp_hier_layer_e get_type(int level) const {
846  KMP_DEBUG_ASSERT(level >= 0);
847  KMP_DEBUG_ASSERT(level < num_layers);
848  return info[level].type;
849  }
850  // Returns the schedule type at given level
851  enum sched_type get_sched(int level) const {
852  KMP_DEBUG_ASSERT(level >= 0);
853  KMP_DEBUG_ASSERT(level < num_layers);
854  return info[level].sched;
855  }
856  // Returns the chunk size at given level
857  ST get_chunk(int level) const {
858  KMP_DEBUG_ASSERT(level >= 0);
859  KMP_DEBUG_ASSERT(level < num_layers);
860  return info[level].chunk;
861  }
862  // Returns the number of active threads at given level
863  int get_num_active(int level) const {
864  KMP_DEBUG_ASSERT(level >= 0);
865  KMP_DEBUG_ASSERT(level < num_layers);
866  return info[level].num_active;
867  }
868  // Returns the length of topology unit array at given level
869  int get_length(int level) const {
870  KMP_DEBUG_ASSERT(level >= 0);
871  KMP_DEBUG_ASSERT(level < num_layers);
872  return info[level].length;
873  }
874  // Returns the topology unit given the level and index
875  kmp_hier_top_unit_t<T> *get_unit(int level, int index) {
876  KMP_DEBUG_ASSERT(level >= 0);
877  KMP_DEBUG_ASSERT(level < num_layers);
878  KMP_DEBUG_ASSERT(index >= 0);
879  KMP_DEBUG_ASSERT(index < get_length(level));
880  return &(layers[level][index]);
881  }
882  // Returns the number of layers in the hierarchy
883  int get_num_layers() const { return num_layers; }
884  // Returns the number of threads in the top layer
885  // This is necessary because we don't store a topology unit as
886  // the very top level and the scheduling algorithms need this information
887  int get_top_level_nproc() const { return top_level_nproc; }
888  // Return whether this hierarchy is valid or not
889  bool is_valid() const { return valid; }
890  // Print the hierarchy
891  void print() {
892  KD_TRACE(10, ("kmp_hier_t:\n"));
893  for (int i = num_layers - 1; i >= 0; --i) {
894  KD_TRACE(10, ("Info[%d] = ", i));
895  info[i].print();
896  }
897  for (int i = num_layers - 1; i >= 0; --i) {
898  KD_TRACE(10, ("Layer[%d] =\n", i));
899  for (int j = 0; j < info[i].length; ++j) {
900  layers[i][j].print();
901  }
902  }
903  }
904 };
905 
906 template <typename T>
907 void __kmp_dispatch_init_hierarchy(ident_t *loc, int n,
908  kmp_hier_layer_e *new_layers,
909  enum sched_type *new_scheds,
910  typename traits_t<T>::signed_t *new_chunks,
911  T lb, T ub,
912  typename traits_t<T>::signed_t st) {
913  typedef typename traits_t<T>::signed_t ST;
914  typedef typename traits_t<T>::unsigned_t UT;
915  int tid, gtid, num_hw_threads, num_threads_per_layer1, active;
916  int my_buffer_index;
917  kmp_info_t *th;
918  kmp_team_t *team;
919  dispatch_private_info_template<T> *pr;
920  dispatch_shared_info_template<T> volatile *sh;
921  gtid = __kmp_entry_gtid();
922  tid = __kmp_tid_from_gtid(gtid);
923 #ifdef KMP_DEBUG
924  KD_TRACE(10, ("__kmp_dispatch_init_hierarchy: T#%d called: %d layer(s)\n",
925  gtid, n));
926  for (int i = 0; i < n; ++i) {
927  const char *layer = __kmp_get_hier_str(new_layers[i]);
928  KD_TRACE(10, ("__kmp_dispatch_init_hierarchy: T#%d: new_layers[%d] = %s, "
929  "new_scheds[%d] = %d, new_chunks[%d] = %u\n",
930  gtid, i, layer, i, (int)new_scheds[i], i, new_chunks[i]));
931  }
932 #endif // KMP_DEBUG
933  KMP_DEBUG_ASSERT(n > 0);
934  KMP_DEBUG_ASSERT(new_layers);
935  KMP_DEBUG_ASSERT(new_scheds);
936  KMP_DEBUG_ASSERT(new_chunks);
937  if (!TCR_4(__kmp_init_parallel))
938  __kmp_parallel_initialize();
939 #if OMP_50_ENABLED
940  __kmp_resume_if_soft_paused();
941 #endif
942 
943  th = __kmp_threads[gtid];
944  team = th->th.th_team;
945  active = !team->t.t_serialized;
946  th->th.th_ident = loc;
947  num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1];
948  if (!active) {
949  KD_TRACE(10, ("__kmp_dispatch_init_hierarchy: T#%d not active parallel. "
950  "Using normal dispatch functions.\n",
951  gtid));
952  pr = reinterpret_cast<dispatch_private_info_template<T> *>(
953  th->th.th_dispatch->th_disp_buffer);
954  KMP_DEBUG_ASSERT(pr);
955  pr->flags.use_hier = FALSE;
956  pr->flags.contains_last = FALSE;
957  return;
958  }
959  KMP_DEBUG_ASSERT(th->th.th_dispatch ==
960  &th->th.th_team->t.t_dispatch[th->th.th_info.ds.ds_tid]);
961 
962  my_buffer_index = th->th.th_dispatch->th_disp_index;
963  pr = reinterpret_cast<dispatch_private_info_template<T> *>(
964  &th->th.th_dispatch
965  ->th_disp_buffer[my_buffer_index % __kmp_dispatch_num_buffers]);
966  sh = reinterpret_cast<dispatch_shared_info_template<T> volatile *>(
967  &team->t.t_disp_buffer[my_buffer_index % __kmp_dispatch_num_buffers]);
968  KMP_DEBUG_ASSERT(pr);
969  KMP_DEBUG_ASSERT(sh);
970  pr->flags.use_hier = TRUE;
971  pr->u.p.tc = 0;
972  // Have master allocate the hierarchy
973  if (__kmp_tid_from_gtid(gtid) == 0) {
974  KD_TRACE(10, ("__kmp_dispatch_init_hierarchy: T#%d pr:%p sh:%p allocating "
975  "hierarchy\n",
976  gtid, pr, sh));
977  if (sh->hier == NULL) {
978  sh->hier = (kmp_hier_t<T> *)__kmp_allocate(sizeof(kmp_hier_t<T>));
979  }
980  sh->hier->allocate_hier(n, new_layers, new_scheds, new_chunks);
981  sh->u.s.iteration = 0;
982  }
983  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
984  // Check to make sure the hierarchy is valid
985  kmp_hier_t<T> *hier = sh->hier;
986  if (!sh->hier->is_valid()) {
987  pr->flags.use_hier = FALSE;
988  return;
989  }
990  // Have threads allocate their thread-private barrier data if it hasn't
991  // already been allocated
992  if (th->th.th_hier_bar_data == NULL) {
993  th->th.th_hier_bar_data = (kmp_hier_private_bdata_t *)__kmp_allocate(
994  sizeof(kmp_hier_private_bdata_t) * kmp_hier_layer_e::LAYER_LAST);
995  }
996  // Have threads "register" themselves by modifiying the active count for each
997  // level they are involved in. The active count will act as nthreads for that
998  // level regarding the scheduling algorithms
999  for (int i = 0; i < n; ++i) {
1000  int index = __kmp_dispatch_get_index(tid, hier->get_type(i));
1001  kmp_hier_top_unit_t<T> *my_unit = hier->get_unit(i, index);
1002  // Setup the thread's private dispatch buffer's hierarchy pointers
1003  if (i == 0)
1004  pr->hier_parent = my_unit;
1005  // If this unit is already active, then increment active count and wait
1006  if (my_unit->is_active()) {
1007  KD_TRACE(10, ("__kmp_dispatch_init_hierarchy: T#%d my_unit (%p) "
1008  "is already active (%d)\n",
1009  gtid, my_unit, my_unit->active));
1010  KMP_TEST_THEN_INC32(&(my_unit->active));
1011  break;
1012  }
1013  // Flag that this unit is active
1014  if (KMP_COMPARE_AND_STORE_ACQ32(&(my_unit->active), 0, 1)) {
1015  // Do not setup parent pointer for top level unit since it has no parent
1016  if (i < n - 1) {
1017  // Setup middle layer pointers to parents
1018  my_unit->get_my_pr()->hier_id =
1019  index % __kmp_dispatch_get_t1_per_t2(hier->get_type(i),
1020  hier->get_type(i + 1));
1021  int parent_index = __kmp_dispatch_get_index(tid, hier->get_type(i + 1));
1022  my_unit->hier_parent = hier->get_unit(i + 1, parent_index);
1023  } else {
1024  // Setup top layer information (no parent pointers are set)
1025  my_unit->get_my_pr()->hier_id =
1026  index % __kmp_dispatch_get_t1_per_t2(hier->get_type(i),
1027  kmp_hier_layer_e::LAYER_LOOP);
1028  KMP_TEST_THEN_INC32(&(hier->top_level_nproc));
1029  my_unit->hier_parent = nullptr;
1030  }
1031  // Set trip count to 0 so that next() operation will initially climb up
1032  // the hierarchy to get more iterations (early exit in next() for tc == 0)
1033  my_unit->get_my_pr()->u.p.tc = 0;
1034  // Increment this layer's number of active units
1035  KMP_TEST_THEN_INC32(&(hier->info[i].num_active));
1036  KD_TRACE(10, ("__kmp_dispatch_init_hierarchy: T#%d my_unit (%p) "
1037  "incrementing num_active\n",
1038  gtid, my_unit));
1039  } else {
1040  KMP_TEST_THEN_INC32(&(my_unit->active));
1041  break;
1042  }
1043  }
1044  // Set this thread's id
1045  num_threads_per_layer1 = __kmp_dispatch_get_t1_per_t2(
1046  kmp_hier_layer_e::LAYER_THREAD, hier->get_type(0));
1047  pr->hier_id = tid % num_threads_per_layer1;
1048  // For oversubscribed threads, increment their index within the lowest unit
1049  // This is done to prevent having two or more threads with id 0, id 1, etc.
1050  if (tid >= num_hw_threads)
1051  pr->hier_id += ((tid / num_hw_threads) * num_threads_per_layer1);
1052  KD_TRACE(
1053  10, ("__kmp_dispatch_init_hierarchy: T#%d setting lowest hier_id to %d\n",
1054  gtid, pr->hier_id));
1055 
1056  pr->flags.contains_last = FALSE;
1057  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
1058 
1059  // Now that the number of active threads at each level is determined,
1060  // the barrier data for each unit can be initialized and the last layer's
1061  // loop information can be initialized.
1062  int prev_id = pr->get_hier_id();
1063  for (int i = 0; i < n; ++i) {
1064  if (prev_id != 0)
1065  break;
1066  int index = __kmp_dispatch_get_index(tid, hier->get_type(i));
1067  kmp_hier_top_unit_t<T> *my_unit = hier->get_unit(i, index);
1068  // Only master threads of this unit within the hierarchy do initialization
1069  KD_TRACE(10, ("__kmp_dispatch_init_hierarchy: T#%d (%d) prev_id is 0\n",
1070  gtid, i));
1071  my_unit->reset_shared_barrier();
1072  my_unit->hier_pr.flags.contains_last = FALSE;
1073  // Last layer, initialize the private buffers with entire loop information
1074  // Now the next next_algorithim() call will get the first chunk of
1075  // iterations properly
1076  if (i == n - 1) {
1077  __kmp_dispatch_init_algorithm<T>(
1078  loc, gtid, my_unit->get_my_pr(), hier->get_sched(i), lb, ub, st,
1079 #if USE_ITT_BUILD
1080  NULL,
1081 #endif
1082  hier->get_chunk(i), hier->get_num_active(i), my_unit->get_hier_id());
1083  }
1084  prev_id = my_unit->get_hier_id();
1085  }
1086  // Initialize each layer of the thread's private barrier data
1087  kmp_hier_top_unit_t<T> *unit = pr->hier_parent;
1088  for (int i = 0; i < n && unit; ++i, unit = unit->get_parent()) {
1089  kmp_hier_private_bdata_t *tdata = &(th->th.th_hier_bar_data[i]);
1090  unit->reset_private_barrier(tdata);
1091  }
1092  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
1093 
1094 #ifdef KMP_DEBUG
1095  if (__kmp_tid_from_gtid(gtid) == 0) {
1096  for (int i = 0; i < n; ++i) {
1097  KD_TRACE(10,
1098  ("__kmp_dispatch_init_hierarchy: T#%d active count[%d] = %d\n",
1099  gtid, i, hier->get_num_active(i)));
1100  }
1101  hier->print();
1102  }
1103  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
1104 #endif // KMP_DEBUG
1105 }
1106 #endif
sched_type
Definition: kmp.h:336
Definition: kmp.h:223