LLVM OpenMP* Runtime Library
kmp.h
1 
2 /*
3  * kmp.h -- KPTS runtime header file.
4  */
5 
6 //===----------------------------------------------------------------------===//
7 //
8 // The LLVM Compiler Infrastructure
9 //
10 // This file is dual licensed under the MIT and the University of Illinois Open
11 // Source Licenses. See LICENSE.txt for details.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef KMP_H
16 #define KMP_H
17 
18 #include "kmp_config.h"
19 
20 /* #define BUILD_PARALLEL_ORDERED 1 */
21 
22 /* This fix replaces gettimeofday with clock_gettime for better scalability on
23  the Altix. Requires user code to be linked with -lrt. */
24 //#define FIX_SGI_CLOCK
25 
26 /* Defines for OpenMP 3.0 tasking and auto scheduling */
27 
28 #ifndef KMP_STATIC_STEAL_ENABLED
29 #define KMP_STATIC_STEAL_ENABLED 1
30 #endif
31 
32 #define TASK_CURRENT_NOT_QUEUED 0
33 #define TASK_CURRENT_QUEUED 1
34 
35 #ifdef BUILD_TIED_TASK_STACK
36 #define TASK_STACK_EMPTY 0 // entries when the stack is empty
37 #define TASK_STACK_BLOCK_BITS 5 // Used in TASK_STACK_SIZE and TASK_STACK_MASK
38 // Number of entries in each task stack array
39 #define TASK_STACK_BLOCK_SIZE (1 << TASK_STACK_BLOCK_BITS)
40 // Mask for determining index into stack block
41 #define TASK_STACK_INDEX_MASK (TASK_STACK_BLOCK_SIZE - 1)
42 #endif // BUILD_TIED_TASK_STACK
43 
44 #define TASK_NOT_PUSHED 1
45 #define TASK_SUCCESSFULLY_PUSHED 0
46 #define TASK_TIED 1
47 #define TASK_UNTIED 0
48 #define TASK_EXPLICIT 1
49 #define TASK_IMPLICIT 0
50 #define TASK_PROXY 1
51 #define TASK_FULL 0
52 
53 #define KMP_CANCEL_THREADS
54 #define KMP_THREAD_ATTR
55 
56 // Android does not have pthread_cancel. Undefine KMP_CANCEL_THREADS if being
57 // built on Android
58 #if defined(__ANDROID__)
59 #undef KMP_CANCEL_THREADS
60 #endif
61 
62 #include <signal.h>
63 #include <stdarg.h>
64 #include <stddef.h>
65 #include <stdio.h>
66 #include <stdlib.h>
67 #include <string.h>
68 /* include <ctype.h> don't use; problems with /MD on Windows* OS NT due to bad
69  Microsoft library. Some macros provided below to replace these functions */
70 #ifndef __ABSOFT_WIN
71 #include <sys/types.h>
72 #endif
73 #include <limits.h>
74 #include <time.h>
75 
76 #include <errno.h>
77 
78 #include "kmp_os.h"
79 
80 #include "kmp_safe_c_api.h"
81 
82 #if KMP_STATS_ENABLED
83 class kmp_stats_list;
84 #endif
85 
86 #if KMP_USE_HIER_SCHED
87 // Only include hierarchical scheduling if affinity is supported
88 #undef KMP_USE_HIER_SCHED
89 #define KMP_USE_HIER_SCHED KMP_AFFINITY_SUPPORTED
90 #endif
91 
92 #if KMP_USE_HWLOC && KMP_AFFINITY_SUPPORTED
93 #include "hwloc.h"
94 #ifndef HWLOC_OBJ_NUMANODE
95 #define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
96 #endif
97 #ifndef HWLOC_OBJ_PACKAGE
98 #define HWLOC_OBJ_PACKAGE HWLOC_OBJ_SOCKET
99 #endif
100 #endif
101 
102 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
103 #include <xmmintrin.h>
104 #endif
105 
106 #include "kmp_debug.h"
107 #include "kmp_lock.h"
108 #include "kmp_version.h"
109 #if USE_DEBUGGER
110 #include "kmp_debugger.h"
111 #endif
112 #include "kmp_i18n.h"
113 
114 #define KMP_HANDLE_SIGNALS (KMP_OS_UNIX || KMP_OS_WINDOWS)
115 
116 #include "kmp_wrapper_malloc.h"
117 #if KMP_OS_UNIX
118 #include <unistd.h>
119 #if !defined NSIG && defined _NSIG
120 #define NSIG _NSIG
121 #endif
122 #endif
123 
124 #if KMP_OS_LINUX
125 #pragma weak clock_gettime
126 #endif
127 
128 #if OMPT_SUPPORT
129 #include "ompt-internal.h"
130 #endif
131 
132 // 0 - no fast memory allocation, alignment: 8-byte on x86, 16-byte on x64.
133 // 3 - fast allocation using sync, non-sync free lists of any size, non-self
134 // free lists of limited size.
135 #ifndef USE_FAST_MEMORY
136 #define USE_FAST_MEMORY 3
137 #endif
138 
139 #ifndef KMP_NESTED_HOT_TEAMS
140 #define KMP_NESTED_HOT_TEAMS 0
141 #define USE_NESTED_HOT_ARG(x)
142 #else
143 #if KMP_NESTED_HOT_TEAMS
144 #if OMP_40_ENABLED
145 #define USE_NESTED_HOT_ARG(x) , x
146 #else
147 // Nested hot teams feature depends on omp 4.0, disable it for earlier versions
148 #undef KMP_NESTED_HOT_TEAMS
149 #define KMP_NESTED_HOT_TEAMS 0
150 #define USE_NESTED_HOT_ARG(x)
151 #endif
152 #else
153 #define USE_NESTED_HOT_ARG(x)
154 #endif
155 #endif
156 
157 // Assume using BGET compare_exchange instruction instead of lock by default.
158 #ifndef USE_CMP_XCHG_FOR_BGET
159 #define USE_CMP_XCHG_FOR_BGET 1
160 #endif
161 
162 // Test to see if queuing lock is better than bootstrap lock for bget
163 // #ifndef USE_QUEUING_LOCK_FOR_BGET
164 // #define USE_QUEUING_LOCK_FOR_BGET
165 // #endif
166 
167 #define KMP_NSEC_PER_SEC 1000000000L
168 #define KMP_USEC_PER_SEC 1000000L
169 
178 enum {
183  /* 0x04 is no longer used */
192  KMP_IDENT_BARRIER_IMPL_MASK = 0x01C0,
193  KMP_IDENT_BARRIER_IMPL_FOR = 0x0040,
194  KMP_IDENT_BARRIER_IMPL_SECTIONS = 0x00C0,
195 
196  KMP_IDENT_BARRIER_IMPL_SINGLE = 0x0140,
197  KMP_IDENT_BARRIER_IMPL_WORKSHARE = 0x01C0,
198 
210  KMP_IDENT_ATOMIC_HINT_UNCONTENDED = 0x010000,
211  KMP_IDENT_ATOMIC_HINT_CONTENDED = 0x020000,
212  KMP_IDENT_ATOMIC_HINT_NONSPECULATIVE = 0x040000,
213  KMP_IDENT_ATOMIC_HINT_SPECULATIVE = 0x080000,
214 };
215 
219 typedef struct ident {
220  kmp_int32 reserved_1;
221  kmp_int32 flags;
223  kmp_int32 reserved_2;
224 #if USE_ITT_BUILD
225 /* but currently used for storing region-specific ITT */
226 /* contextual information. */
227 #endif /* USE_ITT_BUILD */
228  kmp_int32 reserved_3;
229  char const *psource;
233 } ident_t;
238 // Some forward declarations.
239 typedef union kmp_team kmp_team_t;
240 typedef struct kmp_taskdata kmp_taskdata_t;
241 typedef union kmp_task_team kmp_task_team_t;
242 typedef union kmp_team kmp_team_p;
243 typedef union kmp_info kmp_info_p;
244 typedef union kmp_root kmp_root_p;
245 
246 #ifdef __cplusplus
247 extern "C" {
248 #endif
249 
250 /* ------------------------------------------------------------------------ */
251 
252 /* Pack two 32-bit signed integers into a 64-bit signed integer */
253 /* ToDo: Fix word ordering for big-endian machines. */
254 #define KMP_PACK_64(HIGH_32, LOW_32) \
255  ((kmp_int64)((((kmp_uint64)(HIGH_32)) << 32) | (kmp_uint64)(LOW_32)))
256 
257 // Generic string manipulation macros. Assume that _x is of type char *
258 #define SKIP_WS(_x) \
259  { \
260  while (*(_x) == ' ' || *(_x) == '\t') \
261  (_x)++; \
262  }
263 #define SKIP_DIGITS(_x) \
264  { \
265  while (*(_x) >= '0' && *(_x) <= '9') \
266  (_x)++; \
267  }
268 #define SKIP_TOKEN(_x) \
269  { \
270  while ((*(_x) >= '0' && *(_x) <= '9') || (*(_x) >= 'a' && *(_x) <= 'z') || \
271  (*(_x) >= 'A' && *(_x) <= 'Z') || *(_x) == '_') \
272  (_x)++; \
273  }
274 #define SKIP_TO(_x, _c) \
275  { \
276  while (*(_x) != '\0' && *(_x) != (_c)) \
277  (_x)++; \
278  }
279 
280 /* ------------------------------------------------------------------------ */
281 
282 #define KMP_MAX(x, y) ((x) > (y) ? (x) : (y))
283 #define KMP_MIN(x, y) ((x) < (y) ? (x) : (y))
284 
285 /* ------------------------------------------------------------------------ */
286 /* Enumeration types */
287 
288 enum kmp_state_timer {
289  ts_stop,
290  ts_start,
291  ts_pause,
292 
293  ts_last_state
294 };
295 
296 enum dynamic_mode {
297  dynamic_default,
298 #ifdef USE_LOAD_BALANCE
299  dynamic_load_balance,
300 #endif /* USE_LOAD_BALANCE */
301  dynamic_random,
302  dynamic_thread_limit,
303  dynamic_max
304 };
305 
306 /* external schedule constants, duplicate enum omp_sched in omp.h in order to
307  * not include it here */
308 #ifndef KMP_SCHED_TYPE_DEFINED
309 #define KMP_SCHED_TYPE_DEFINED
310 typedef enum kmp_sched {
311  kmp_sched_lower = 0, // lower and upper bounds are for routine parameter check
312  // Note: need to adjust __kmp_sch_map global array in case enum is changed
313  kmp_sched_static = 1, // mapped to kmp_sch_static_chunked (33)
314  kmp_sched_dynamic = 2, // mapped to kmp_sch_dynamic_chunked (35)
315  kmp_sched_guided = 3, // mapped to kmp_sch_guided_chunked (36)
316  kmp_sched_auto = 4, // mapped to kmp_sch_auto (38)
317  kmp_sched_upper_std = 5, // upper bound for standard schedules
318  kmp_sched_lower_ext = 100, // lower bound of Intel extension schedules
319  kmp_sched_trapezoidal = 101, // mapped to kmp_sch_trapezoidal (39)
320 #if KMP_STATIC_STEAL_ENABLED
321  kmp_sched_static_steal = 102, // mapped to kmp_sch_static_steal (44)
322 #endif
323  kmp_sched_upper,
324  kmp_sched_default = kmp_sched_static // default scheduling
325 } kmp_sched_t;
326 #endif
327 
332 enum sched_type : kmp_int32 {
334  kmp_sch_static_chunked = 33,
336  kmp_sch_dynamic_chunked = 35,
338  kmp_sch_runtime = 37,
340  kmp_sch_trapezoidal = 39,
341 
342  /* accessible only through KMP_SCHEDULE environment variable */
343  kmp_sch_static_greedy = 40,
344  kmp_sch_static_balanced = 41,
345  /* accessible only through KMP_SCHEDULE environment variable */
346  kmp_sch_guided_iterative_chunked = 42,
347  kmp_sch_guided_analytical_chunked = 43,
348  /* accessible only through KMP_SCHEDULE environment variable */
349  kmp_sch_static_steal = 44,
350 
351 #if OMP_45_ENABLED
352  /* static with chunk adjustment (e.g., simd) */
353  kmp_sch_static_balanced_chunked = 45,
354  kmp_sch_guided_simd = 46,
355  kmp_sch_runtime_simd = 47,
356 #endif
357 
358  /* accessible only through KMP_SCHEDULE environment variable */
362  kmp_ord_static_chunked = 65,
364  kmp_ord_dynamic_chunked = 67,
365  kmp_ord_guided_chunked = 68,
366  kmp_ord_runtime = 69,
368  kmp_ord_trapezoidal = 71,
371 #if OMP_40_ENABLED
372  /* Schedules for Distribute construct */
375 #endif
376 
377  /* For the "nomerge" versions, kmp_dispatch_next*() will always return a
378  single iteration/chunk, even if the loop is serialized. For the schedule
379  types listed above, the entire iteration vector is returned if the loop is
380  serialized. This doesn't work for gcc/gcomp sections. */
381  kmp_nm_lower = 160,
383  kmp_nm_static_chunked =
384  (kmp_sch_static_chunked - kmp_sch_lower + kmp_nm_lower),
386  kmp_nm_dynamic_chunked = 163,
388  kmp_nm_runtime = 165,
389  kmp_nm_auto = 166,
390  kmp_nm_trapezoidal = 167,
391 
392  /* accessible only through KMP_SCHEDULE environment variable */
393  kmp_nm_static_greedy = 168,
394  kmp_nm_static_balanced = 169,
395  /* accessible only through KMP_SCHEDULE environment variable */
396  kmp_nm_guided_iterative_chunked = 170,
397  kmp_nm_guided_analytical_chunked = 171,
398  kmp_nm_static_steal =
399  172, /* accessible only through OMP_SCHEDULE environment variable */
400 
401  kmp_nm_ord_static_chunked = 193,
403  kmp_nm_ord_dynamic_chunked = 195,
404  kmp_nm_ord_guided_chunked = 196,
405  kmp_nm_ord_runtime = 197,
407  kmp_nm_ord_trapezoidal = 199,
410 #if OMP_45_ENABLED
411  /* Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers. Since
412  we need to distinguish the three possible cases (no modifier, monotonic
413  modifier, nonmonotonic modifier), we need separate bits for each modifier.
414  The absence of monotonic does not imply nonmonotonic, especially since 4.5
415  says that the behaviour of the "no modifier" case is implementation defined
416  in 4.5, but will become "nonmonotonic" in 5.0.
417 
418  Since we're passing a full 32 bit value, we can use a couple of high bits
419  for these flags; out of paranoia we avoid the sign bit.
420 
421  These modifiers can be or-ed into non-static schedules by the compiler to
422  pass the additional information. They will be stripped early in the
423  processing in __kmp_dispatch_init when setting up schedules, so most of the
424  code won't ever see schedules with these bits set. */
425  kmp_sch_modifier_monotonic =
426  (1 << 29),
427  kmp_sch_modifier_nonmonotonic =
428  (1 << 30),
430 #define SCHEDULE_WITHOUT_MODIFIERS(s) \
431  (enum sched_type)( \
432  (s) & ~(kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic))
433 #define SCHEDULE_HAS_MONOTONIC(s) (((s)&kmp_sch_modifier_monotonic) != 0)
434 #define SCHEDULE_HAS_NONMONOTONIC(s) (((s)&kmp_sch_modifier_nonmonotonic) != 0)
435 #define SCHEDULE_HAS_NO_MODIFIERS(s) \
436  (((s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)) == 0)
437 #else
438 /* By doing this we hope to avoid multiple tests on OMP_45_ENABLED. Compilers
439  can now eliminate tests on compile time constants and dead code that results
440  from them, so we can leave code guarded by such an if in place. */
441 #define SCHEDULE_WITHOUT_MODIFIERS(s) (s)
442 #define SCHEDULE_HAS_MONOTONIC(s) false
443 #define SCHEDULE_HAS_NONMONOTONIC(s) false
444 #define SCHEDULE_HAS_NO_MODIFIERS(s) true
445 #endif
446 
448 };
449 
450 /* Type to keep runtime schedule set via OMP_SCHEDULE or omp_set_schedule() */
451 typedef union kmp_r_sched {
452  struct {
453  enum sched_type r_sched_type;
454  int chunk;
455  };
456  kmp_int64 sched;
457 } kmp_r_sched_t;
458 
459 extern enum sched_type __kmp_sch_map[]; // map OMP 3.0 schedule types with our
460 // internal schedule types
461 
462 enum library_type {
463  library_none,
464  library_serial,
465  library_turnaround,
466  library_throughput
467 };
468 
469 #if KMP_OS_LINUX
470 enum clock_function_type {
471  clock_function_gettimeofday,
472  clock_function_clock_gettime
473 };
474 #endif /* KMP_OS_LINUX */
475 
476 #if KMP_MIC_SUPPORTED
477 enum mic_type { non_mic, mic1, mic2, mic3, dummy };
478 #endif
479 
480 /* -- fast reduction stuff ------------------------------------------------ */
481 
482 #undef KMP_FAST_REDUCTION_BARRIER
483 #define KMP_FAST_REDUCTION_BARRIER 1
484 
485 #undef KMP_FAST_REDUCTION_CORE_DUO
486 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
487 #define KMP_FAST_REDUCTION_CORE_DUO 1
488 #endif
489 
490 enum _reduction_method {
491  reduction_method_not_defined = 0,
492  critical_reduce_block = (1 << 8),
493  atomic_reduce_block = (2 << 8),
494  tree_reduce_block = (3 << 8),
495  empty_reduce_block = (4 << 8)
496 };
497 
498 // Description of the packed_reduction_method variable:
499 // The packed_reduction_method variable consists of two enum types variables
500 // that are packed together into 0-th byte and 1-st byte:
501 // 0: (packed_reduction_method & 0x000000FF) is a 'enum barrier_type' value of
502 // barrier that will be used in fast reduction: bs_plain_barrier or
503 // bs_reduction_barrier
504 // 1: (packed_reduction_method & 0x0000FF00) is a reduction method that will
505 // be used in fast reduction;
506 // Reduction method is of 'enum _reduction_method' type and it's defined the way
507 // so that the bits of 0-th byte are empty, so no need to execute a shift
508 // instruction while packing/unpacking
509 
510 #if KMP_FAST_REDUCTION_BARRIER
511 #define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
512  ((reduction_method) | (barrier_type))
513 
514 #define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
515  ((enum _reduction_method)((packed_reduction_method) & (0x0000FF00)))
516 
517 #define UNPACK_REDUCTION_BARRIER(packed_reduction_method) \
518  ((enum barrier_type)((packed_reduction_method) & (0x000000FF)))
519 #else
520 #define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
521  (reduction_method)
522 
523 #define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
524  (packed_reduction_method)
525 
526 #define UNPACK_REDUCTION_BARRIER(packed_reduction_method) (bs_plain_barrier)
527 #endif
528 
529 #define TEST_REDUCTION_METHOD(packed_reduction_method, which_reduction_block) \
530  ((UNPACK_REDUCTION_METHOD(packed_reduction_method)) == \
531  (which_reduction_block))
532 
533 #if KMP_FAST_REDUCTION_BARRIER
534 #define TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER \
535  (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_reduction_barrier))
536 
537 #define TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER \
538  (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_plain_barrier))
539 #endif
540 
541 typedef int PACKED_REDUCTION_METHOD_T;
542 
543 /* -- end of fast reduction stuff ----------------------------------------- */
544 
545 #if KMP_OS_WINDOWS
546 #define USE_CBLKDATA
547 #pragma warning(push)
548 #pragma warning(disable : 271 310)
549 #include <windows.h>
550 #pragma warning(pop)
551 #endif
552 
553 #if KMP_OS_UNIX
554 #include <dlfcn.h>
555 #include <pthread.h>
556 #endif
557 
558 /* Only Linux* OS and Windows* OS support thread affinity. */
559 #if KMP_AFFINITY_SUPPORTED
560 
561 // GROUP_AFFINITY is already defined for _MSC_VER>=1600 (VS2010 and later).
562 #if KMP_OS_WINDOWS
563 #if _MSC_VER < 1600
564 typedef struct GROUP_AFFINITY {
565  KAFFINITY Mask;
566  WORD Group;
567  WORD Reserved[3];
568 } GROUP_AFFINITY;
569 #endif /* _MSC_VER < 1600 */
570 #if KMP_GROUP_AFFINITY
571 extern int __kmp_num_proc_groups;
572 #else
573 static const int __kmp_num_proc_groups = 1;
574 #endif /* KMP_GROUP_AFFINITY */
575 typedef DWORD (*kmp_GetActiveProcessorCount_t)(WORD);
576 extern kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount;
577 
578 typedef WORD (*kmp_GetActiveProcessorGroupCount_t)(void);
579 extern kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount;
580 
581 typedef BOOL (*kmp_GetThreadGroupAffinity_t)(HANDLE, GROUP_AFFINITY *);
582 extern kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity;
583 
584 typedef BOOL (*kmp_SetThreadGroupAffinity_t)(HANDLE, const GROUP_AFFINITY *,
585  GROUP_AFFINITY *);
586 extern kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity;
587 #endif /* KMP_OS_WINDOWS */
588 
589 #if KMP_USE_HWLOC
590 extern hwloc_topology_t __kmp_hwloc_topology;
591 extern int __kmp_hwloc_error;
592 extern int __kmp_numa_detected;
593 extern int __kmp_tile_depth;
594 #endif
595 
596 extern size_t __kmp_affin_mask_size;
597 #define KMP_AFFINITY_CAPABLE() (__kmp_affin_mask_size > 0)
598 #define KMP_AFFINITY_DISABLE() (__kmp_affin_mask_size = 0)
599 #define KMP_AFFINITY_ENABLE(mask_size) (__kmp_affin_mask_size = mask_size)
600 #define KMP_CPU_SET_ITERATE(i, mask) \
601  for (i = (mask)->begin(); (int)i != (mask)->end(); i = (mask)->next(i))
602 #define KMP_CPU_SET(i, mask) (mask)->set(i)
603 #define KMP_CPU_ISSET(i, mask) (mask)->is_set(i)
604 #define KMP_CPU_CLR(i, mask) (mask)->clear(i)
605 #define KMP_CPU_ZERO(mask) (mask)->zero()
606 #define KMP_CPU_COPY(dest, src) (dest)->copy(src)
607 #define KMP_CPU_AND(dest, src) (dest)->bitwise_and(src)
608 #define KMP_CPU_COMPLEMENT(max_bit_number, mask) (mask)->bitwise_not()
609 #define KMP_CPU_UNION(dest, src) (dest)->bitwise_or(src)
610 #define KMP_CPU_ALLOC(ptr) (ptr = __kmp_affinity_dispatch->allocate_mask())
611 #define KMP_CPU_FREE(ptr) __kmp_affinity_dispatch->deallocate_mask(ptr)
612 #define KMP_CPU_ALLOC_ON_STACK(ptr) KMP_CPU_ALLOC(ptr)
613 #define KMP_CPU_FREE_FROM_STACK(ptr) KMP_CPU_FREE(ptr)
614 #define KMP_CPU_INTERNAL_ALLOC(ptr) KMP_CPU_ALLOC(ptr)
615 #define KMP_CPU_INTERNAL_FREE(ptr) KMP_CPU_FREE(ptr)
616 #define KMP_CPU_INDEX(arr, i) __kmp_affinity_dispatch->index_mask_array(arr, i)
617 #define KMP_CPU_ALLOC_ARRAY(arr, n) \
618  (arr = __kmp_affinity_dispatch->allocate_mask_array(n))
619 #define KMP_CPU_FREE_ARRAY(arr, n) \
620  __kmp_affinity_dispatch->deallocate_mask_array(arr)
621 #define KMP_CPU_INTERNAL_ALLOC_ARRAY(arr, n) KMP_CPU_ALLOC_ARRAY(arr, n)
622 #define KMP_CPU_INTERNAL_FREE_ARRAY(arr, n) KMP_CPU_FREE_ARRAY(arr, n)
623 #define __kmp_get_system_affinity(mask, abort_bool) \
624  (mask)->get_system_affinity(abort_bool)
625 #define __kmp_set_system_affinity(mask, abort_bool) \
626  (mask)->set_system_affinity(abort_bool)
627 #define __kmp_get_proc_group(mask) (mask)->get_proc_group()
628 
629 class KMPAffinity {
630 public:
631  class Mask {
632  public:
633  void *operator new(size_t n);
634  void operator delete(void *p);
635  void *operator new[](size_t n);
636  void operator delete[](void *p);
637  virtual ~Mask() {}
638  // Set bit i to 1
639  virtual void set(int i) {}
640  // Return bit i
641  virtual bool is_set(int i) const { return false; }
642  // Set bit i to 0
643  virtual void clear(int i) {}
644  // Zero out entire mask
645  virtual void zero() {}
646  // Copy src into this mask
647  virtual void copy(const Mask *src) {}
648  // this &= rhs
649  virtual void bitwise_and(const Mask *rhs) {}
650  // this |= rhs
651  virtual void bitwise_or(const Mask *rhs) {}
652  // this = ~this
653  virtual void bitwise_not() {}
654  // API for iterating over an affinity mask
655  // for (int i = mask->begin(); i != mask->end(); i = mask->next(i))
656  virtual int begin() const { return 0; }
657  virtual int end() const { return 0; }
658  virtual int next(int previous) const { return 0; }
659  // Set the system's affinity to this affinity mask's value
660  virtual int set_system_affinity(bool abort_on_error) const { return -1; }
661  // Set this affinity mask to the current system affinity
662  virtual int get_system_affinity(bool abort_on_error) { return -1; }
663  // Only 1 DWORD in the mask should have any procs set.
664  // Return the appropriate index, or -1 for an invalid mask.
665  virtual int get_proc_group() const { return -1; }
666  };
667  void *operator new(size_t n);
668  void operator delete(void *p);
669  // Need virtual destructor
670  virtual ~KMPAffinity() = default;
671  // Determine if affinity is capable
672  virtual void determine_capable(const char *env_var) {}
673  // Bind the current thread to os proc
674  virtual void bind_thread(int proc) {}
675  // Factory functions to allocate/deallocate a mask
676  virtual Mask *allocate_mask() { return nullptr; }
677  virtual void deallocate_mask(Mask *m) {}
678  virtual Mask *allocate_mask_array(int num) { return nullptr; }
679  virtual void deallocate_mask_array(Mask *m) {}
680  virtual Mask *index_mask_array(Mask *m, int index) { return nullptr; }
681  static void pick_api();
682  static void destroy_api();
683  enum api_type {
684  NATIVE_OS
685 #if KMP_USE_HWLOC
686  ,
687  HWLOC
688 #endif
689  };
690  virtual api_type get_api_type() const {
691  KMP_ASSERT(0);
692  return NATIVE_OS;
693  }
694 
695 private:
696  static bool picked_api;
697 };
698 
699 typedef KMPAffinity::Mask kmp_affin_mask_t;
700 extern KMPAffinity *__kmp_affinity_dispatch;
701 
702 // Declare local char buffers with this size for printing debug and info
703 // messages, using __kmp_affinity_print_mask().
704 #define KMP_AFFIN_MASK_PRINT_LEN 1024
705 
706 enum affinity_type {
707  affinity_none = 0,
708  affinity_physical,
709  affinity_logical,
710  affinity_compact,
711  affinity_scatter,
712  affinity_explicit,
713  affinity_balanced,
714  affinity_disabled, // not used outsize the env var parser
715  affinity_default
716 };
717 
718 enum affinity_gran {
719  affinity_gran_fine = 0,
720  affinity_gran_thread,
721  affinity_gran_core,
722  affinity_gran_tile,
723  affinity_gran_numa,
724  affinity_gran_package,
725  affinity_gran_node,
726 #if KMP_GROUP_AFFINITY
727  // The "group" granularity isn't necesssarily coarser than all of the
728  // other levels, but we put it last in the enum.
729  affinity_gran_group,
730 #endif /* KMP_GROUP_AFFINITY */
731  affinity_gran_default
732 };
733 
734 enum affinity_top_method {
735  affinity_top_method_all = 0, // try all (supported) methods, in order
736 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
737  affinity_top_method_apicid,
738  affinity_top_method_x2apicid,
739 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
740  affinity_top_method_cpuinfo, // KMP_CPUINFO_FILE is usable on Windows* OS, too
741 #if KMP_GROUP_AFFINITY
742  affinity_top_method_group,
743 #endif /* KMP_GROUP_AFFINITY */
744  affinity_top_method_flat,
745 #if KMP_USE_HWLOC
746  affinity_top_method_hwloc,
747 #endif
748  affinity_top_method_default
749 };
750 
751 #define affinity_respect_mask_default (-1)
752 
753 extern enum affinity_type __kmp_affinity_type; /* Affinity type */
754 extern enum affinity_gran __kmp_affinity_gran; /* Affinity granularity */
755 extern int __kmp_affinity_gran_levels; /* corresponding int value */
756 extern int __kmp_affinity_dups; /* Affinity duplicate masks */
757 extern enum affinity_top_method __kmp_affinity_top_method;
758 extern int __kmp_affinity_compact; /* Affinity 'compact' value */
759 extern int __kmp_affinity_offset; /* Affinity offset value */
760 extern int __kmp_affinity_verbose; /* Was verbose specified for KMP_AFFINITY? */
761 extern int __kmp_affinity_warnings; /* KMP_AFFINITY warnings enabled ? */
762 extern int __kmp_affinity_respect_mask; // Respect process' init affinity mask?
763 extern char *__kmp_affinity_proclist; /* proc ID list */
764 extern kmp_affin_mask_t *__kmp_affinity_masks;
765 extern unsigned __kmp_affinity_num_masks;
766 extern void __kmp_affinity_bind_thread(int which);
767 
768 extern kmp_affin_mask_t *__kmp_affin_fullMask;
769 extern char *__kmp_cpuinfo_file;
770 
771 #endif /* KMP_AFFINITY_SUPPORTED */
772 
773 #if OMP_40_ENABLED
774 
775 // This needs to be kept in sync with the values in omp.h !!!
776 typedef enum kmp_proc_bind_t {
777  proc_bind_false = 0,
778  proc_bind_true,
779  proc_bind_master,
780  proc_bind_close,
781  proc_bind_spread,
782  proc_bind_intel, // use KMP_AFFINITY interface
783  proc_bind_default
784 } kmp_proc_bind_t;
785 
786 typedef struct kmp_nested_proc_bind_t {
787  kmp_proc_bind_t *bind_types;
788  int size;
789  int used;
790 } kmp_nested_proc_bind_t;
791 
792 extern kmp_nested_proc_bind_t __kmp_nested_proc_bind;
793 
794 #endif /* OMP_40_ENABLED */
795 
796 #if KMP_AFFINITY_SUPPORTED
797 #define KMP_PLACE_ALL (-1)
798 #define KMP_PLACE_UNDEFINED (-2)
799 // Is KMP_AFFINITY is being used instead of OMP_PROC_BIND/OMP_PLACES?
800 #define KMP_AFFINITY_NON_PROC_BIND \
801  ((__kmp_nested_proc_bind.bind_types[0] == proc_bind_false || \
802  __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) && \
803  (__kmp_affinity_num_masks > 0 || __kmp_affinity_type == affinity_balanced))
804 #endif /* KMP_AFFINITY_SUPPORTED */
805 
806 extern int __kmp_affinity_num_places;
807 
808 #if OMP_40_ENABLED
809 typedef enum kmp_cancel_kind_t {
810  cancel_noreq = 0,
811  cancel_parallel = 1,
812  cancel_loop = 2,
813  cancel_sections = 3,
814  cancel_taskgroup = 4
815 } kmp_cancel_kind_t;
816 #endif // OMP_40_ENABLED
817 
818 // KMP_HW_SUBSET support:
819 typedef struct kmp_hws_item {
820  int num;
821  int offset;
822 } kmp_hws_item_t;
823 
824 extern kmp_hws_item_t __kmp_hws_socket;
825 extern kmp_hws_item_t __kmp_hws_node;
826 extern kmp_hws_item_t __kmp_hws_tile;
827 extern kmp_hws_item_t __kmp_hws_core;
828 extern kmp_hws_item_t __kmp_hws_proc;
829 extern int __kmp_hws_requested;
830 extern int __kmp_hws_abs_flag; // absolute or per-item number requested
831 
832 /* ------------------------------------------------------------------------ */
833 
834 #define KMP_PAD(type, sz) \
835  (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
836 
837 // We need to avoid using -1 as a GTID as +1 is added to the gtid
838 // when storing it in a lock, and the value 0 is reserved.
839 #define KMP_GTID_DNE (-2) /* Does not exist */
840 #define KMP_GTID_SHUTDOWN (-3) /* Library is shutting down */
841 #define KMP_GTID_MONITOR (-4) /* Monitor thread ID */
842 #define KMP_GTID_UNKNOWN (-5) /* Is not known */
843 #define KMP_GTID_MIN (-6) /* Minimal gtid for low bound check in DEBUG */
844 
845 #if OMP_50_ENABLED
846 /* OpenMP 5.0 Memory Management support */
847 extern int __kmp_memkind_available;
848 extern int __kmp_hbw_mem_available;
849 typedef void *omp_allocator_t;
850 extern const omp_allocator_t *OMP_NULL_ALLOCATOR;
851 extern const omp_allocator_t *omp_default_mem_alloc;
852 extern const omp_allocator_t *omp_large_cap_mem_alloc;
853 extern const omp_allocator_t *omp_const_mem_alloc;
854 extern const omp_allocator_t *omp_high_bw_mem_alloc;
855 extern const omp_allocator_t *omp_low_lat_mem_alloc;
856 extern const omp_allocator_t *omp_cgroup_mem_alloc;
857 extern const omp_allocator_t *omp_pteam_mem_alloc;
858 extern const omp_allocator_t *omp_thread_mem_alloc;
859 extern const omp_allocator_t *__kmp_def_allocator;
860 
861 extern void __kmpc_set_default_allocator(int gtid, const omp_allocator_t *al);
862 extern const omp_allocator_t *__kmpc_get_default_allocator(int gtid);
863 extern void *__kmpc_alloc(int gtid, size_t sz, const omp_allocator_t *al);
864 extern void __kmpc_free(int gtid, void *ptr, const omp_allocator_t *al);
865 
866 extern void __kmp_init_memkind();
867 extern void __kmp_fini_memkind();
868 #endif // OMP_50_ENABLED
869 
870 /* ------------------------------------------------------------------------ */
871 
872 #define KMP_UINT64_MAX \
873  (~((kmp_uint64)1 << ((sizeof(kmp_uint64) * (1 << 3)) - 1)))
874 
875 #define KMP_MIN_NTH 1
876 
877 #ifndef KMP_MAX_NTH
878 #if defined(PTHREAD_THREADS_MAX) && PTHREAD_THREADS_MAX < INT_MAX
879 #define KMP_MAX_NTH PTHREAD_THREADS_MAX
880 #else
881 #define KMP_MAX_NTH INT_MAX
882 #endif
883 #endif /* KMP_MAX_NTH */
884 
885 #ifdef PTHREAD_STACK_MIN
886 #define KMP_MIN_STKSIZE PTHREAD_STACK_MIN
887 #else
888 #define KMP_MIN_STKSIZE ((size_t)(32 * 1024))
889 #endif
890 
891 #define KMP_MAX_STKSIZE (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
892 
893 #if KMP_ARCH_X86
894 #define KMP_DEFAULT_STKSIZE ((size_t)(2 * 1024 * 1024))
895 #elif KMP_ARCH_X86_64
896 #define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
897 #define KMP_BACKUP_STKSIZE ((size_t)(2 * 1024 * 1024))
898 #else
899 #define KMP_DEFAULT_STKSIZE ((size_t)(1024 * 1024))
900 #endif
901 
902 #define KMP_DEFAULT_MALLOC_POOL_INCR ((size_t)(1024 * 1024))
903 #define KMP_MIN_MALLOC_POOL_INCR ((size_t)(4 * 1024))
904 #define KMP_MAX_MALLOC_POOL_INCR \
905  (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
906 
907 #define KMP_MIN_STKOFFSET (0)
908 #define KMP_MAX_STKOFFSET KMP_MAX_STKSIZE
909 #if KMP_OS_DARWIN
910 #define KMP_DEFAULT_STKOFFSET KMP_MIN_STKOFFSET
911 #else
912 #define KMP_DEFAULT_STKOFFSET CACHE_LINE
913 #endif
914 
915 #define KMP_MIN_STKPADDING (0)
916 #define KMP_MAX_STKPADDING (2 * 1024 * 1024)
917 
918 #define KMP_BLOCKTIME_MULTIPLIER \
919  (1000) /* number of blocktime units per second */
920 #define KMP_MIN_BLOCKTIME (0)
921 #define KMP_MAX_BLOCKTIME \
922  (INT_MAX) /* Must be this for "infinite" setting the work */
923 #define KMP_DEFAULT_BLOCKTIME (200) /* __kmp_blocktime is in milliseconds */
924 
925 #if KMP_USE_MONITOR
926 #define KMP_DEFAULT_MONITOR_STKSIZE ((size_t)(64 * 1024))
927 #define KMP_MIN_MONITOR_WAKEUPS (1) // min times monitor wakes up per second
928 #define KMP_MAX_MONITOR_WAKEUPS (1000) // max times monitor can wake up per sec
929 
930 /* Calculate new number of monitor wakeups for a specific block time based on
931  previous monitor_wakeups. Only allow increasing number of wakeups */
932 #define KMP_WAKEUPS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
933  (((blocktime) == KMP_MAX_BLOCKTIME) \
934  ? (monitor_wakeups) \
935  : ((blocktime) == KMP_MIN_BLOCKTIME) \
936  ? KMP_MAX_MONITOR_WAKEUPS \
937  : ((monitor_wakeups) > (KMP_BLOCKTIME_MULTIPLIER / (blocktime))) \
938  ? (monitor_wakeups) \
939  : (KMP_BLOCKTIME_MULTIPLIER) / (blocktime))
940 
941 /* Calculate number of intervals for a specific block time based on
942  monitor_wakeups */
943 #define KMP_INTERVALS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
944  (((blocktime) + (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)) - 1) / \
945  (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)))
946 #else
947 #define KMP_BLOCKTIME(team, tid) \
948  (get__bt_set(team, tid) ? get__blocktime(team, tid) : __kmp_dflt_blocktime)
949 #if KMP_OS_UNIX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
950 // HW TSC is used to reduce overhead (clock tick instead of nanosecond).
951 extern kmp_uint64 __kmp_ticks_per_msec;
952 #if KMP_COMPILER_ICC
953 #define KMP_NOW() ((kmp_uint64)_rdtsc())
954 #else
955 #define KMP_NOW() __kmp_hardware_timestamp()
956 #endif
957 #define KMP_NOW_MSEC() (KMP_NOW() / __kmp_ticks_per_msec)
958 #define KMP_BLOCKTIME_INTERVAL(team, tid) \
959  (KMP_BLOCKTIME(team, tid) * __kmp_ticks_per_msec)
960 #define KMP_BLOCKING(goal, count) ((goal) > KMP_NOW())
961 #else
962 // System time is retrieved sporadically while blocking.
963 extern kmp_uint64 __kmp_now_nsec();
964 #define KMP_NOW() __kmp_now_nsec()
965 #define KMP_NOW_MSEC() (KMP_NOW() / KMP_USEC_PER_SEC)
966 #define KMP_BLOCKTIME_INTERVAL(team, tid) \
967  (KMP_BLOCKTIME(team, tid) * KMP_USEC_PER_SEC)
968 #define KMP_BLOCKING(goal, count) ((count) % 1000 != 0 || (goal) > KMP_NOW())
969 #endif
970 #define KMP_YIELD_NOW() \
971  (KMP_NOW_MSEC() / KMP_MAX(__kmp_dflt_blocktime, 1) % \
972  (__kmp_yield_on_count + __kmp_yield_off_count) < \
973  (kmp_uint32)__kmp_yield_on_count)
974 #endif // KMP_USE_MONITOR
975 
976 #define KMP_MIN_STATSCOLS 40
977 #define KMP_MAX_STATSCOLS 4096
978 #define KMP_DEFAULT_STATSCOLS 80
979 
980 #define KMP_MIN_INTERVAL 0
981 #define KMP_MAX_INTERVAL (INT_MAX - 1)
982 #define KMP_DEFAULT_INTERVAL 0
983 
984 #define KMP_MIN_CHUNK 1
985 #define KMP_MAX_CHUNK (INT_MAX - 1)
986 #define KMP_DEFAULT_CHUNK 1
987 
988 #define KMP_MIN_INIT_WAIT 1
989 #define KMP_MAX_INIT_WAIT (INT_MAX / 2)
990 #define KMP_DEFAULT_INIT_WAIT 2048U
991 
992 #define KMP_MIN_NEXT_WAIT 1
993 #define KMP_MAX_NEXT_WAIT (INT_MAX / 2)
994 #define KMP_DEFAULT_NEXT_WAIT 1024U
995 
996 #define KMP_DFLT_DISP_NUM_BUFF 7
997 #define KMP_MAX_ORDERED 8
998 
999 #define KMP_MAX_FIELDS 32
1000 
1001 #define KMP_MAX_BRANCH_BITS 31
1002 
1003 #define KMP_MAX_ACTIVE_LEVELS_LIMIT INT_MAX
1004 
1005 #define KMP_MAX_DEFAULT_DEVICE_LIMIT INT_MAX
1006 
1007 #define KMP_MAX_TASK_PRIORITY_LIMIT INT_MAX
1008 
1009 /* Minimum number of threads before switch to TLS gtid (experimentally
1010  determined) */
1011 /* josh TODO: what about OS X* tuning? */
1012 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1013 #define KMP_TLS_GTID_MIN 5
1014 #else
1015 #define KMP_TLS_GTID_MIN INT_MAX
1016 #endif
1017 
1018 #define KMP_MASTER_TID(tid) ((tid) == 0)
1019 #define KMP_WORKER_TID(tid) ((tid) != 0)
1020 
1021 #define KMP_MASTER_GTID(gtid) (__kmp_tid_from_gtid((gtid)) == 0)
1022 #define KMP_WORKER_GTID(gtid) (__kmp_tid_from_gtid((gtid)) != 0)
1023 #define KMP_INITIAL_GTID(gtid) ((gtid) == 0)
1024 
1025 #ifndef TRUE
1026 #define FALSE 0
1027 #define TRUE (!FALSE)
1028 #endif
1029 
1030 /* NOTE: all of the following constants must be even */
1031 
1032 #if KMP_OS_WINDOWS
1033 #define KMP_INIT_WAIT 64U /* initial number of spin-tests */
1034 #define KMP_NEXT_WAIT 32U /* susequent number of spin-tests */
1035 #elif KMP_OS_CNK
1036 #define KMP_INIT_WAIT 16U /* initial number of spin-tests */
1037 #define KMP_NEXT_WAIT 8U /* susequent number of spin-tests */
1038 #elif KMP_OS_LINUX
1039 #define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
1040 #define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
1041 #elif KMP_OS_DARWIN
1042 /* TODO: tune for KMP_OS_DARWIN */
1043 #define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
1044 #define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
1045 #elif KMP_OS_FREEBSD
1046 /* TODO: tune for KMP_OS_FREEBSD */
1047 #define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
1048 #define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
1049 #elif KMP_OS_NETBSD
1050 /* TODO: tune for KMP_OS_NETBSD */
1051 #define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
1052 #define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
1053 #elif KMP_OS_HURD
1054 /* TODO: tune for KMP_OS_HURD */
1055 #define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
1056 #define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
1057 #endif
1058 
1059 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1060 typedef struct kmp_cpuid {
1061  kmp_uint32 eax;
1062  kmp_uint32 ebx;
1063  kmp_uint32 ecx;
1064  kmp_uint32 edx;
1065 } kmp_cpuid_t;
1066 extern void __kmp_x86_cpuid(int mode, int mode2, struct kmp_cpuid *p);
1067 #if KMP_ARCH_X86
1068 extern void __kmp_x86_pause(void);
1069 #elif KMP_MIC
1070 // Performance testing on KNC (C0QS-7120 P/A/X/D, 61-core, 16 GB Memory) showed
1071 // regression after removal of extra PAUSE from KMP_YIELD_SPIN(). Changing
1072 // the delay from 100 to 300 showed even better performance than double PAUSE
1073 // on Spec OMP2001 and LCPC tasking tests, no regressions on EPCC.
1074 static inline void __kmp_x86_pause(void) { _mm_delay_32(300); }
1075 #else
1076 static inline void __kmp_x86_pause(void) { _mm_pause(); }
1077 #endif
1078 #define KMP_CPU_PAUSE() __kmp_x86_pause()
1079 #elif KMP_ARCH_PPC64
1080 #define KMP_PPC64_PRI_LOW() __asm__ volatile("or 1, 1, 1")
1081 #define KMP_PPC64_PRI_MED() __asm__ volatile("or 2, 2, 2")
1082 #define KMP_PPC64_PRI_LOC_MB() __asm__ volatile("" : : : "memory")
1083 #define KMP_CPU_PAUSE() \
1084  do { \
1085  KMP_PPC64_PRI_LOW(); \
1086  KMP_PPC64_PRI_MED(); \
1087  KMP_PPC64_PRI_LOC_MB(); \
1088  } while (0)
1089 #else
1090 #define KMP_CPU_PAUSE() /* nothing to do */
1091 #endif
1092 
1093 #define KMP_INIT_YIELD(count) \
1094  { (count) = __kmp_yield_init; }
1095 
1096 #define KMP_YIELD(cond) \
1097  { \
1098  KMP_CPU_PAUSE(); \
1099  __kmp_yield((cond)); \
1100  }
1101 
1102 // Note the decrement of 2 in the following Macros. With KMP_LIBRARY=turnaround,
1103 // there should be no yielding since initial value from KMP_INIT_YIELD() is odd.
1104 
1105 #define KMP_YIELD_WHEN(cond, count) \
1106  { \
1107  KMP_CPU_PAUSE(); \
1108  (count) -= 2; \
1109  if (!(count)) { \
1110  __kmp_yield(cond); \
1111  (count) = __kmp_yield_next; \
1112  } \
1113  }
1114 #define KMP_YIELD_SPIN(count) \
1115  { \
1116  KMP_CPU_PAUSE(); \
1117  (count) -= 2; \
1118  if (!(count)) { \
1119  __kmp_yield(1); \
1120  (count) = __kmp_yield_next; \
1121  } \
1122  }
1123 
1124 /* ------------------------------------------------------------------------ */
1125 /* Support datatypes for the orphaned construct nesting checks. */
1126 /* ------------------------------------------------------------------------ */
1127 
1128 enum cons_type {
1129  ct_none,
1130  ct_parallel,
1131  ct_pdo,
1132  ct_pdo_ordered,
1133  ct_psections,
1134  ct_psingle,
1135 
1136  /* the following must be left in order and not split up */
1137  ct_taskq,
1138  ct_task, // really task inside non-ordered taskq, considered worksharing type
1139  ct_task_ordered, /* really task inside ordered taskq, considered a worksharing
1140  type */
1141  /* the preceding must be left in order and not split up */
1142 
1143  ct_critical,
1144  ct_ordered_in_parallel,
1145  ct_ordered_in_pdo,
1146  ct_ordered_in_taskq,
1147  ct_master,
1148  ct_reduce,
1149  ct_barrier
1150 };
1151 
1152 /* test to see if we are in a taskq construct */
1153 #define IS_CONS_TYPE_TASKQ(ct) \
1154  (((int)(ct)) >= ((int)ct_taskq) && ((int)(ct)) <= ((int)ct_task_ordered))
1155 #define IS_CONS_TYPE_ORDERED(ct) \
1156  ((ct) == ct_pdo_ordered || (ct) == ct_task_ordered)
1157 
1158 struct cons_data {
1159  ident_t const *ident;
1160  enum cons_type type;
1161  int prev;
1162  kmp_user_lock_p
1163  name; /* address exclusively for critical section name comparison */
1164 };
1165 
1166 struct cons_header {
1167  int p_top, w_top, s_top;
1168  int stack_size, stack_top;
1169  struct cons_data *stack_data;
1170 };
1171 
1172 struct kmp_region_info {
1173  char *text;
1174  int offset[KMP_MAX_FIELDS];
1175  int length[KMP_MAX_FIELDS];
1176 };
1177 
1178 /* ---------------------------------------------------------------------- */
1179 /* ---------------------------------------------------------------------- */
1180 
1181 #if KMP_OS_WINDOWS
1182 typedef HANDLE kmp_thread_t;
1183 typedef DWORD kmp_key_t;
1184 #endif /* KMP_OS_WINDOWS */
1185 
1186 #if KMP_OS_UNIX
1187 typedef pthread_t kmp_thread_t;
1188 typedef pthread_key_t kmp_key_t;
1189 #endif
1190 
1191 extern kmp_key_t __kmp_gtid_threadprivate_key;
1192 
1193 typedef struct kmp_sys_info {
1194  long maxrss; /* the maximum resident set size utilized (in kilobytes) */
1195  long minflt; /* the number of page faults serviced without any I/O */
1196  long majflt; /* the number of page faults serviced that required I/O */
1197  long nswap; /* the number of times a process was "swapped" out of memory */
1198  long inblock; /* the number of times the file system had to perform input */
1199  long oublock; /* the number of times the file system had to perform output */
1200  long nvcsw; /* the number of times a context switch was voluntarily */
1201  long nivcsw; /* the number of times a context switch was forced */
1202 } kmp_sys_info_t;
1203 
1204 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1205 typedef struct kmp_cpuinfo {
1206  int initialized; // If 0, other fields are not initialized.
1207  int signature; // CPUID(1).EAX
1208  int family; // CPUID(1).EAX[27:20]+CPUID(1).EAX[11:8] (Extended Family+Family)
1209  int model; // ( CPUID(1).EAX[19:16] << 4 ) + CPUID(1).EAX[7:4] ( ( Extended
1210  // Model << 4 ) + Model)
1211  int stepping; // CPUID(1).EAX[3:0] ( Stepping )
1212  int sse2; // 0 if SSE2 instructions are not supported, 1 otherwise.
1213  int rtm; // 0 if RTM instructions are not supported, 1 otherwise.
1214  int cpu_stackoffset;
1215  int apic_id;
1216  int physical_id;
1217  int logical_id;
1218  kmp_uint64 frequency; // Nominal CPU frequency in Hz.
1219  char name[3 * sizeof(kmp_cpuid_t)]; // CPUID(0x80000002,0x80000003,0x80000004)
1220 } kmp_cpuinfo_t;
1221 #endif
1222 
1223 #if USE_ITT_BUILD
1224 // We cannot include "kmp_itt.h" due to circular dependency. Declare the only
1225 // required type here. Later we will check the type meets requirements.
1226 typedef int kmp_itt_mark_t;
1227 #define KMP_ITT_DEBUG 0
1228 #endif /* USE_ITT_BUILD */
1229 
1230 /* Taskq data structures */
1231 
1232 #define HIGH_WATER_MARK(nslots) (((nslots)*3) / 4)
1233 // num thunks that each thread can simultaneously execute from a task queue
1234 #define __KMP_TASKQ_THUNKS_PER_TH 1
1235 
1236 /* flags for taskq_global_flags, kmp_task_queue_t tq_flags, kmpc_thunk_t
1237  th_flags */
1238 
1239 #define TQF_IS_ORDERED 0x0001 // __kmpc_taskq interface, taskq ordered
1240 // __kmpc_taskq interface, taskq with lastprivate list
1241 #define TQF_IS_LASTPRIVATE 0x0002
1242 #define TQF_IS_NOWAIT 0x0004 // __kmpc_taskq interface, end taskq nowait
1243 // __kmpc_taskq interface, use heuristics to decide task queue size
1244 #define TQF_HEURISTICS 0x0008
1245 
1246 // __kmpc_taskq interface, reserved for future use
1247 #define TQF_INTERFACE_RESERVED1 0x0010
1248 // __kmpc_taskq interface, reserved for future use
1249 #define TQF_INTERFACE_RESERVED2 0x0020
1250 // __kmpc_taskq interface, reserved for future use
1251 #define TQF_INTERFACE_RESERVED3 0x0040
1252 // __kmpc_taskq interface, reserved for future use
1253 #define TQF_INTERFACE_RESERVED4 0x0080
1254 
1255 #define TQF_INTERFACE_FLAGS 0x00ff // all the __kmpc_taskq interface flags
1256 // internal/read by instrumentation; only used with TQF_IS_LASTPRIVATE
1257 #define TQF_IS_LAST_TASK 0x0100
1258 // internal use only; this thunk->th_task is the taskq_task
1259 #define TQF_TASKQ_TASK 0x0200
1260 // internal use only; must release worker threads once ANY queued task
1261 // exists (global)
1262 #define TQF_RELEASE_WORKERS 0x0400
1263 // internal use only; notify workers that master has finished enqueuing tasks
1264 #define TQF_ALL_TASKS_QUEUED 0x0800
1265 // internal use only: this queue encountered in parallel context: not serialized
1266 #define TQF_PARALLEL_CONTEXT 0x1000
1267 // internal use only; this queue is on the freelist and not in use
1268 #define TQF_DEALLOCATED 0x2000
1269 
1270 #define TQF_INTERNAL_FLAGS 0x3f00 // all the internal use only flags
1271 
1272 typedef struct KMP_ALIGN_CACHE kmpc_aligned_int32_t {
1273  kmp_int32 ai_data;
1274 } kmpc_aligned_int32_t;
1275 
1276 typedef struct KMP_ALIGN_CACHE kmpc_aligned_queue_slot_t {
1277  struct kmpc_thunk_t *qs_thunk;
1278 } kmpc_aligned_queue_slot_t;
1279 
1280 typedef struct kmpc_task_queue_t {
1281  /* task queue linkage fields for n-ary tree of queues (locked with global
1282  taskq_tree_lck) */
1283  kmp_lock_t tq_link_lck; /* lock for child link, child next/prev links and
1284  child ref counts */
1285  union {
1286  struct kmpc_task_queue_t *tq_parent; // pointer to parent taskq, not locked
1287  // for taskq internal freelists, locked with global taskq_freelist_lck
1288  struct kmpc_task_queue_t *tq_next_free;
1289  } tq;
1290  // pointer to linked-list of children, locked by tq's tq_link_lck
1291  volatile struct kmpc_task_queue_t *tq_first_child;
1292  // next child in linked-list, locked by parent tq's tq_link_lck
1293  struct kmpc_task_queue_t *tq_next_child;
1294  // previous child in linked-list, locked by parent tq's tq_link_lck
1295  struct kmpc_task_queue_t *tq_prev_child;
1296  // reference count of threads with access to this task queue
1297  volatile kmp_int32 tq_ref_count;
1298  /* (other than the thread executing the kmpc_end_taskq call) */
1299  /* locked by parent tq's tq_link_lck */
1300 
1301  /* shared data for task queue */
1302  /* per-thread array of pointers to shared variable structures */
1303  struct kmpc_aligned_shared_vars_t *tq_shareds;
1304  /* only one array element exists for all but outermost taskq */
1305 
1306  /* bookkeeping for ordered task queue */
1307  kmp_uint32 tq_tasknum_queuing; // ordered task # assigned while queuing tasks
1308  // ordered number of next task to be served (executed)
1309  volatile kmp_uint32 tq_tasknum_serving;
1310 
1311  /* thunk storage management for task queue */
1312  kmp_lock_t tq_free_thunks_lck; /* lock for thunk freelist manipulation */
1313  // thunk freelist, chained via th.th_next_free
1314  struct kmpc_thunk_t *tq_free_thunks;
1315  // space allocated for thunks for this task queue
1316  struct kmpc_thunk_t *tq_thunk_space;
1317 
1318  /* data fields for queue itself */
1319  kmp_lock_t tq_queue_lck; /* lock for [de]enqueue operations: tq_queue,
1320  tq_head, tq_tail, tq_nfull */
1321  /* array of queue slots to hold thunks for tasks */
1322  kmpc_aligned_queue_slot_t *tq_queue;
1323  volatile struct kmpc_thunk_t *tq_taskq_slot; /* special slot for taskq task
1324  thunk, occupied if not NULL */
1325  kmp_int32 tq_nslots; /* # of tq_thunk_space thunks alloc'd (not incl.
1326  tq_taskq_slot space) */
1327  kmp_int32 tq_head; // enqueue puts item here (index into tq_queue array)
1328  kmp_int32 tq_tail; // dequeue takes item from here (index into tq_queue array)
1329  volatile kmp_int32 tq_nfull; // # of occupied entries in task queue right now
1330  kmp_int32 tq_hiwat; /* high-water mark for tq_nfull and queue scheduling */
1331  volatile kmp_int32 tq_flags; /* TQF_xxx */
1332 
1333  /* bookkeeping for outstanding thunks */
1334 
1335  /* per-thread array for # of regular thunks currently being executed */
1336  struct kmpc_aligned_int32_t *tq_th_thunks;
1337  kmp_int32 tq_nproc; /* number of thunks in the th_thunks array */
1338 
1339  /* statistics library bookkeeping */
1340  ident_t *tq_loc; /* source location information for taskq directive */
1341 } kmpc_task_queue_t;
1342 
1343 typedef void (*kmpc_task_t)(kmp_int32 global_tid, struct kmpc_thunk_t *thunk);
1344 
1345 /* sizeof_shareds passed as arg to __kmpc_taskq call */
1346 typedef struct kmpc_shared_vars_t { /* aligned during dynamic allocation */
1347  kmpc_task_queue_t *sv_queue; /* (pointers to) shared vars */
1348 } kmpc_shared_vars_t;
1349 
1350 typedef struct KMP_ALIGN_CACHE kmpc_aligned_shared_vars_t {
1351  volatile struct kmpc_shared_vars_t *ai_data;
1352 } kmpc_aligned_shared_vars_t;
1353 
1354 /* sizeof_thunk passed as arg to kmpc_taskq call */
1355 typedef struct kmpc_thunk_t { /* aligned during dynamic allocation */
1356  union { /* field used for internal freelists too */
1357  kmpc_shared_vars_t *th_shareds;
1358  struct kmpc_thunk_t *th_next_free; /* freelist of individual thunks within
1359  queue, head at tq_free_thunks */
1360  } th;
1361  kmpc_task_t th_task; /* taskq_task if flags & TQF_TASKQ_TASK */
1362  struct kmpc_thunk_t *th_encl_thunk; /* pointer to dynamically enclosing thunk
1363  on this thread's call stack */
1364  // TQF_xxx(tq_flags interface plus possible internal flags)
1365  kmp_int32 th_flags;
1366 
1367  kmp_int32 th_status;
1368  kmp_uint32 th_tasknum; /* task number assigned in order of queuing, used for
1369  ordered sections */
1370  /* private vars */
1371 } kmpc_thunk_t;
1372 
1373 typedef struct KMP_ALIGN_CACHE kmp_taskq {
1374  int tq_curr_thunk_capacity;
1375 
1376  kmpc_task_queue_t *tq_root;
1377  kmp_int32 tq_global_flags;
1378 
1379  kmp_lock_t tq_freelist_lck;
1380  kmpc_task_queue_t *tq_freelist;
1381 
1382  kmpc_thunk_t **tq_curr_thunk;
1383 } kmp_taskq_t;
1384 
1385 /* END Taskq data structures */
1386 
1387 typedef kmp_int32 kmp_critical_name[8];
1388 
1398 typedef void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid, ...);
1399 typedef void (*kmpc_micro_bound)(kmp_int32 *bound_tid, kmp_int32 *bound_nth,
1400  ...);
1401 
1406 /* ---------------------------------------------------------------------------
1407  */
1408 /* Threadprivate initialization/finalization function declarations */
1409 
1410 /* for non-array objects: __kmpc_threadprivate_register() */
1411 
1416 typedef void *(*kmpc_ctor)(void *);
1417 
1422 typedef void (*kmpc_dtor)(
1423  void * /*, size_t */); /* 2nd arg: magic number for KCC unused by Intel
1424  compiler */
1429 typedef void *(*kmpc_cctor)(void *, void *);
1430 
1431 /* for array objects: __kmpc_threadprivate_register_vec() */
1432 /* First arg: "this" pointer */
1433 /* Last arg: number of array elements */
1439 typedef void *(*kmpc_ctor_vec)(void *, size_t);
1445 typedef void (*kmpc_dtor_vec)(void *, size_t);
1451 typedef void *(*kmpc_cctor_vec)(void *, void *,
1452  size_t); /* function unused by compiler */
1453 
1458 /* keeps tracked of threadprivate cache allocations for cleanup later */
1459 typedef struct kmp_cached_addr {
1460  void **addr; /* address of allocated cache */
1461  void ***compiler_cache; /* pointer to compiler's cache */
1462  void *data; /* pointer to global data */
1463  struct kmp_cached_addr *next; /* pointer to next cached address */
1464 } kmp_cached_addr_t;
1465 
1466 struct private_data {
1467  struct private_data *next; /* The next descriptor in the list */
1468  void *data; /* The data buffer for this descriptor */
1469  int more; /* The repeat count for this descriptor */
1470  size_t size; /* The data size for this descriptor */
1471 };
1472 
1473 struct private_common {
1474  struct private_common *next;
1475  struct private_common *link;
1476  void *gbl_addr;
1477  void *par_addr; /* par_addr == gbl_addr for MASTER thread */
1478  size_t cmn_size;
1479 };
1480 
1481 struct shared_common {
1482  struct shared_common *next;
1483  struct private_data *pod_init;
1484  void *obj_init;
1485  void *gbl_addr;
1486  union {
1487  kmpc_ctor ctor;
1488  kmpc_ctor_vec ctorv;
1489  } ct;
1490  union {
1491  kmpc_cctor cctor;
1492  kmpc_cctor_vec cctorv;
1493  } cct;
1494  union {
1495  kmpc_dtor dtor;
1496  kmpc_dtor_vec dtorv;
1497  } dt;
1498  size_t vec_len;
1499  int is_vec;
1500  size_t cmn_size;
1501 };
1502 
1503 #define KMP_HASH_TABLE_LOG2 9 /* log2 of the hash table size */
1504 #define KMP_HASH_TABLE_SIZE \
1505  (1 << KMP_HASH_TABLE_LOG2) /* size of the hash table */
1506 #define KMP_HASH_SHIFT 3 /* throw away this many low bits from the address */
1507 #define KMP_HASH(x) \
1508  ((((kmp_uintptr_t)x) >> KMP_HASH_SHIFT) & (KMP_HASH_TABLE_SIZE - 1))
1509 
1510 struct common_table {
1511  struct private_common *data[KMP_HASH_TABLE_SIZE];
1512 };
1513 
1514 struct shared_table {
1515  struct shared_common *data[KMP_HASH_TABLE_SIZE];
1516 };
1517 
1518 /* ------------------------------------------------------------------------ */
1519 
1520 #if KMP_USE_HIER_SCHED
1521 // Shared barrier data that exists inside a single unit of the scheduling
1522 // hierarchy
1523 typedef struct kmp_hier_private_bdata_t {
1524  kmp_int32 num_active;
1525  kmp_uint64 index;
1526  kmp_uint64 wait_val[2];
1527 } kmp_hier_private_bdata_t;
1528 #endif
1529 
1530 typedef struct kmp_sched_flags {
1531  unsigned ordered : 1;
1532  unsigned nomerge : 1;
1533  unsigned contains_last : 1;
1534 #if KMP_USE_HIER_SCHED
1535  unsigned use_hier : 1;
1536  unsigned unused : 28;
1537 #else
1538  unsigned unused : 29;
1539 #endif
1540 } kmp_sched_flags_t;
1541 
1542 KMP_BUILD_ASSERT(sizeof(kmp_sched_flags_t) == 4);
1543 
1544 #if KMP_STATIC_STEAL_ENABLED
1545 typedef struct KMP_ALIGN_CACHE dispatch_private_info32 {
1546  kmp_int32 count;
1547  kmp_int32 ub;
1548  /* Adding KMP_ALIGN_CACHE here doesn't help / can hurt performance */
1549  kmp_int32 lb;
1550  kmp_int32 st;
1551  kmp_int32 tc;
1552  kmp_int32 static_steal_counter; /* for static_steal only; maybe better to put
1553  after ub */
1554 
1555  // KMP_ALIGN( 16 ) ensures ( if the KMP_ALIGN macro is turned on )
1556  // a) parm3 is properly aligned and
1557  // b) all parm1-4 are in the same cache line.
1558  // Because of parm1-4 are used together, performance seems to be better
1559  // if they are in the same line (not measured though).
1560 
1561  struct KMP_ALIGN(32) { // AC: changed 16 to 32 in order to simplify template
1562  kmp_int32 parm1; // structures in kmp_dispatch.cpp. This should
1563  kmp_int32 parm2; // make no real change at least while padding is off.
1564  kmp_int32 parm3;
1565  kmp_int32 parm4;
1566  };
1567 
1568  kmp_uint32 ordered_lower;
1569  kmp_uint32 ordered_upper;
1570 #if KMP_OS_WINDOWS
1571  // This var can be placed in the hole between 'tc' and 'parm1', instead of
1572  // 'static_steal_counter'. It would be nice to measure execution times.
1573  // Conditional if/endif can be removed at all.
1574  kmp_int32 last_upper;
1575 #endif /* KMP_OS_WINDOWS */
1576 } dispatch_private_info32_t;
1577 
1578 typedef struct KMP_ALIGN_CACHE dispatch_private_info64 {
1579  kmp_int64 count; // current chunk number for static & static-steal scheduling
1580  kmp_int64 ub; /* upper-bound */
1581  /* Adding KMP_ALIGN_CACHE here doesn't help / can hurt performance */
1582  kmp_int64 lb; /* lower-bound */
1583  kmp_int64 st; /* stride */
1584  kmp_int64 tc; /* trip count (number of iterations) */
1585  kmp_int64 static_steal_counter; /* for static_steal only; maybe better to put
1586  after ub */
1587 
1588  /* parm[1-4] are used in different ways by different scheduling algorithms */
1589 
1590  // KMP_ALIGN( 32 ) ensures ( if the KMP_ALIGN macro is turned on )
1591  // a) parm3 is properly aligned and
1592  // b) all parm1-4 are in the same cache line.
1593  // Because of parm1-4 are used together, performance seems to be better
1594  // if they are in the same line (not measured though).
1595 
1596  struct KMP_ALIGN(32) {
1597  kmp_int64 parm1;
1598  kmp_int64 parm2;
1599  kmp_int64 parm3;
1600  kmp_int64 parm4;
1601  };
1602 
1603  kmp_uint64 ordered_lower;
1604  kmp_uint64 ordered_upper;
1605 #if KMP_OS_WINDOWS
1606  // This var can be placed in the hole between 'tc' and 'parm1', instead of
1607  // 'static_steal_counter'. It would be nice to measure execution times.
1608  // Conditional if/endif can be removed at all.
1609  kmp_int64 last_upper;
1610 #endif /* KMP_OS_WINDOWS */
1611 } dispatch_private_info64_t;
1612 #else /* KMP_STATIC_STEAL_ENABLED */
1613 typedef struct KMP_ALIGN_CACHE dispatch_private_info32 {
1614  kmp_int32 lb;
1615  kmp_int32 ub;
1616  kmp_int32 st;
1617  kmp_int32 tc;
1618 
1619  kmp_int32 parm1;
1620  kmp_int32 parm2;
1621  kmp_int32 parm3;
1622  kmp_int32 parm4;
1623 
1624  kmp_int32 count;
1625 
1626  kmp_uint32 ordered_lower;
1627  kmp_uint32 ordered_upper;
1628 #if KMP_OS_WINDOWS
1629  kmp_int32 last_upper;
1630 #endif /* KMP_OS_WINDOWS */
1631 } dispatch_private_info32_t;
1632 
1633 typedef struct KMP_ALIGN_CACHE dispatch_private_info64 {
1634  kmp_int64 lb; /* lower-bound */
1635  kmp_int64 ub; /* upper-bound */
1636  kmp_int64 st; /* stride */
1637  kmp_int64 tc; /* trip count (number of iterations) */
1638 
1639  /* parm[1-4] are used in different ways by different scheduling algorithms */
1640  kmp_int64 parm1;
1641  kmp_int64 parm2;
1642  kmp_int64 parm3;
1643  kmp_int64 parm4;
1644 
1645  kmp_int64 count; /* current chunk number for static scheduling */
1646 
1647  kmp_uint64 ordered_lower;
1648  kmp_uint64 ordered_upper;
1649 #if KMP_OS_WINDOWS
1650  kmp_int64 last_upper;
1651 #endif /* KMP_OS_WINDOWS */
1652 } dispatch_private_info64_t;
1653 #endif /* KMP_STATIC_STEAL_ENABLED */
1654 
1655 typedef struct KMP_ALIGN_CACHE dispatch_private_info {
1656  union private_info {
1657  dispatch_private_info32_t p32;
1658  dispatch_private_info64_t p64;
1659  } u;
1660  enum sched_type schedule; /* scheduling algorithm */
1661  kmp_sched_flags_t flags; /* flags (e.g., ordered, nomerge, etc.) */
1662  kmp_int32 ordered_bumped;
1663  // To retain the structure size after making ordered_iteration scalar
1664  kmp_int32 ordered_dummy[KMP_MAX_ORDERED - 3];
1665  // Stack of buffers for nest of serial regions
1666  struct dispatch_private_info *next;
1667  kmp_int32 type_size; /* the size of types in private_info */
1668 #if KMP_USE_HIER_SCHED
1669  kmp_int32 hier_id;
1670  void *parent; /* hierarchical scheduling parent pointer */
1671 #endif
1672  enum cons_type pushed_ws;
1673 } dispatch_private_info_t;
1674 
1675 typedef struct dispatch_shared_info32 {
1676  /* chunk index under dynamic, number of idle threads under static-steal;
1677  iteration index otherwise */
1678  volatile kmp_uint32 iteration;
1679  volatile kmp_uint32 num_done;
1680  volatile kmp_uint32 ordered_iteration;
1681  // Dummy to retain the structure size after making ordered_iteration scalar
1682  kmp_int32 ordered_dummy[KMP_MAX_ORDERED - 1];
1683 } dispatch_shared_info32_t;
1684 
1685 typedef struct dispatch_shared_info64 {
1686  /* chunk index under dynamic, number of idle threads under static-steal;
1687  iteration index otherwise */
1688  volatile kmp_uint64 iteration;
1689  volatile kmp_uint64 num_done;
1690  volatile kmp_uint64 ordered_iteration;
1691  // Dummy to retain the structure size after making ordered_iteration scalar
1692  kmp_int64 ordered_dummy[KMP_MAX_ORDERED - 3];
1693 } dispatch_shared_info64_t;
1694 
1695 typedef struct dispatch_shared_info {
1696  union shared_info {
1697  dispatch_shared_info32_t s32;
1698  dispatch_shared_info64_t s64;
1699  } u;
1700  volatile kmp_uint32 buffer_index;
1701 #if OMP_45_ENABLED
1702  volatile kmp_int32 doacross_buf_idx; // teamwise index
1703  volatile kmp_uint32 *doacross_flags; // shared array of iteration flags (0/1)
1704  kmp_int32 doacross_num_done; // count finished threads
1705 #endif
1706 #if KMP_USE_HIER_SCHED
1707  void *hier;
1708 #endif
1709 #if KMP_USE_HWLOC
1710  // When linking with libhwloc, the ORDERED EPCC test slows down on big
1711  // machines (> 48 cores). Performance analysis showed that a cache thrash
1712  // was occurring and this padding helps alleviate the problem.
1713  char padding[64];
1714 #endif
1715 } dispatch_shared_info_t;
1716 
1717 typedef struct kmp_disp {
1718  /* Vector for ORDERED SECTION */
1719  void (*th_deo_fcn)(int *gtid, int *cid, ident_t *);
1720  /* Vector for END ORDERED SECTION */
1721  void (*th_dxo_fcn)(int *gtid, int *cid, ident_t *);
1722 
1723  dispatch_shared_info_t *th_dispatch_sh_current;
1724  dispatch_private_info_t *th_dispatch_pr_current;
1725 
1726  dispatch_private_info_t *th_disp_buffer;
1727  kmp_int32 th_disp_index;
1728 #if OMP_45_ENABLED
1729  kmp_int32 th_doacross_buf_idx; // thread's doacross buffer index
1730  volatile kmp_uint32 *th_doacross_flags; // pointer to shared array of flags
1731  union { // we can use union here because doacross cannot be used in
1732  // nonmonotonic loops
1733  kmp_int64 *th_doacross_info; // info on loop bounds
1734  kmp_lock_t *th_steal_lock; // lock used for chunk stealing (8-byte variable)
1735  };
1736 #else
1737 #if KMP_STATIC_STEAL_ENABLED
1738  kmp_lock_t *th_steal_lock; // lock used for chunk stealing (8-byte variable)
1739  void *dummy_padding[1]; // make it 64 bytes on Intel(R) 64
1740 #else
1741  void *dummy_padding[2]; // make it 64 bytes on Intel(R) 64
1742 #endif
1743 #endif
1744 #if KMP_USE_INTERNODE_ALIGNMENT
1745  char more_padding[INTERNODE_CACHE_LINE];
1746 #endif
1747 } kmp_disp_t;
1748 
1749 /* ------------------------------------------------------------------------ */
1750 /* Barrier stuff */
1751 
1752 /* constants for barrier state update */
1753 #define KMP_INIT_BARRIER_STATE 0 /* should probably start from zero */
1754 #define KMP_BARRIER_SLEEP_BIT 0 /* bit used for suspend/sleep part of state */
1755 #define KMP_BARRIER_UNUSED_BIT 1 // bit that must never be set for valid state
1756 #define KMP_BARRIER_BUMP_BIT 2 /* lsb used for bump of go/arrived state */
1757 
1758 #define KMP_BARRIER_SLEEP_STATE (1 << KMP_BARRIER_SLEEP_BIT)
1759 #define KMP_BARRIER_UNUSED_STATE (1 << KMP_BARRIER_UNUSED_BIT)
1760 #define KMP_BARRIER_STATE_BUMP (1 << KMP_BARRIER_BUMP_BIT)
1761 
1762 #if (KMP_BARRIER_SLEEP_BIT >= KMP_BARRIER_BUMP_BIT)
1763 #error "Barrier sleep bit must be smaller than barrier bump bit"
1764 #endif
1765 #if (KMP_BARRIER_UNUSED_BIT >= KMP_BARRIER_BUMP_BIT)
1766 #error "Barrier unused bit must be smaller than barrier bump bit"
1767 #endif
1768 
1769 // Constants for release barrier wait state: currently, hierarchical only
1770 #define KMP_BARRIER_NOT_WAITING 0 // Normal state; worker not in wait_sleep
1771 #define KMP_BARRIER_OWN_FLAG \
1772  1 // Normal state; worker waiting on own b_go flag in release
1773 #define KMP_BARRIER_PARENT_FLAG \
1774  2 // Special state; worker waiting on parent's b_go flag in release
1775 #define KMP_BARRIER_SWITCH_TO_OWN_FLAG \
1776  3 // Special state; tells worker to shift from parent to own b_go
1777 #define KMP_BARRIER_SWITCHING \
1778  4 // Special state; worker resets appropriate flag on wake-up
1779 
1780 #define KMP_NOT_SAFE_TO_REAP \
1781  0 // Thread th_reap_state: not safe to reap (tasking)
1782 #define KMP_SAFE_TO_REAP 1 // Thread th_reap_state: safe to reap (not tasking)
1783 
1784 enum barrier_type {
1785  bs_plain_barrier = 0, /* 0, All non-fork/join barriers (except reduction
1786  barriers if enabled) */
1787  bs_forkjoin_barrier, /* 1, All fork/join (parallel region) barriers */
1788 #if KMP_FAST_REDUCTION_BARRIER
1789  bs_reduction_barrier, /* 2, All barriers that are used in reduction */
1790 #endif // KMP_FAST_REDUCTION_BARRIER
1791  bs_last_barrier /* Just a placeholder to mark the end */
1792 };
1793 
1794 // to work with reduction barriers just like with plain barriers
1795 #if !KMP_FAST_REDUCTION_BARRIER
1796 #define bs_reduction_barrier bs_plain_barrier
1797 #endif // KMP_FAST_REDUCTION_BARRIER
1798 
1799 typedef enum kmp_bar_pat { /* Barrier communication patterns */
1800  bp_linear_bar =
1801  0, /* Single level (degenerate) tree */
1802  bp_tree_bar =
1803  1, /* Balanced tree with branching factor 2^n */
1804  bp_hyper_bar =
1805  2, /* Hypercube-embedded tree with min branching
1806  factor 2^n */
1807  bp_hierarchical_bar = 3, /* Machine hierarchy tree */
1808  bp_last_bar /* Placeholder to mark the end */
1809 } kmp_bar_pat_e;
1810 
1811 #define KMP_BARRIER_ICV_PUSH 1
1812 
1813 /* Record for holding the values of the internal controls stack records */
1814 typedef struct kmp_internal_control {
1815  int serial_nesting_level; /* corresponds to the value of the
1816  th_team_serialized field */
1817  kmp_int8 nested; /* internal control for nested parallelism (per thread) */
1818  kmp_int8 dynamic; /* internal control for dynamic adjustment of threads (per
1819  thread) */
1820  kmp_int8
1821  bt_set; /* internal control for whether blocktime is explicitly set */
1822  int blocktime; /* internal control for blocktime */
1823 #if KMP_USE_MONITOR
1824  int bt_intervals; /* internal control for blocktime intervals */
1825 #endif
1826  int nproc; /* internal control for #threads for next parallel region (per
1827  thread) */
1828  int max_active_levels; /* internal control for max_active_levels */
1829  kmp_r_sched_t
1830  sched; /* internal control for runtime schedule {sched,chunk} pair */
1831 #if OMP_40_ENABLED
1832  kmp_proc_bind_t proc_bind; /* internal control for affinity */
1833  kmp_int32 default_device; /* internal control for default device */
1834 #endif // OMP_40_ENABLED
1835  struct kmp_internal_control *next;
1836 } kmp_internal_control_t;
1837 
1838 static inline void copy_icvs(kmp_internal_control_t *dst,
1839  kmp_internal_control_t *src) {
1840  *dst = *src;
1841 }
1842 
1843 /* Thread barrier needs volatile barrier fields */
1844 typedef struct KMP_ALIGN_CACHE kmp_bstate {
1845  // th_fixed_icvs is aligned by virtue of kmp_bstate being aligned (and all
1846  // uses of it). It is not explicitly aligned below, because we *don't* want
1847  // it to be padded -- instead, we fit b_go into the same cache line with
1848  // th_fixed_icvs, enabling NGO cache lines stores in the hierarchical barrier.
1849  kmp_internal_control_t th_fixed_icvs; // Initial ICVs for the thread
1850  // Tuck b_go into end of th_fixed_icvs cache line, so it can be stored with
1851  // same NGO store
1852  volatile kmp_uint64 b_go; // STATE => task should proceed (hierarchical)
1853  KMP_ALIGN_CACHE volatile kmp_uint64
1854  b_arrived; // STATE => task reached synch point.
1855  kmp_uint32 *skip_per_level;
1856  kmp_uint32 my_level;
1857  kmp_int32 parent_tid;
1858  kmp_int32 old_tid;
1859  kmp_uint32 depth;
1860  struct kmp_bstate *parent_bar;
1861  kmp_team_t *team;
1862  kmp_uint64 leaf_state;
1863  kmp_uint32 nproc;
1864  kmp_uint8 base_leaf_kids;
1865  kmp_uint8 leaf_kids;
1866  kmp_uint8 offset;
1867  kmp_uint8 wait_flag;
1868  kmp_uint8 use_oncore_barrier;
1869 #if USE_DEBUGGER
1870  // The following field is intended for the debugger solely. Only the worker
1871  // thread itself accesses this field: the worker increases it by 1 when it
1872  // arrives to a barrier.
1873  KMP_ALIGN_CACHE kmp_uint b_worker_arrived;
1874 #endif /* USE_DEBUGGER */
1875 } kmp_bstate_t;
1876 
1877 union KMP_ALIGN_CACHE kmp_barrier_union {
1878  double b_align; /* use worst case alignment */
1879  char b_pad[KMP_PAD(kmp_bstate_t, CACHE_LINE)];
1880  kmp_bstate_t bb;
1881 };
1882 
1883 typedef union kmp_barrier_union kmp_balign_t;
1884 
1885 /* Team barrier needs only non-volatile arrived counter */
1886 union KMP_ALIGN_CACHE kmp_barrier_team_union {
1887  double b_align; /* use worst case alignment */
1888  char b_pad[CACHE_LINE];
1889  struct {
1890  kmp_uint64 b_arrived; /* STATE => task reached synch point. */
1891 #if USE_DEBUGGER
1892  // The following two fields are indended for the debugger solely. Only
1893  // master of the team accesses these fields: the first one is increased by
1894  // 1 when master arrives to a barrier, the second one is increased by one
1895  // when all the threads arrived.
1896  kmp_uint b_master_arrived;
1897  kmp_uint b_team_arrived;
1898 #endif
1899  };
1900 };
1901 
1902 typedef union kmp_barrier_team_union kmp_balign_team_t;
1903 
1904 /* Padding for Linux* OS pthreads condition variables and mutexes used to signal
1905  threads when a condition changes. This is to workaround an NPTL bug where
1906  padding was added to pthread_cond_t which caused the initialization routine
1907  to write outside of the structure if compiled on pre-NPTL threads. */
1908 #if KMP_OS_WINDOWS
1909 typedef struct kmp_win32_mutex {
1910  /* The Lock */
1911  CRITICAL_SECTION cs;
1912 } kmp_win32_mutex_t;
1913 
1914 typedef struct kmp_win32_cond {
1915  /* Count of the number of waiters. */
1916  int waiters_count_;
1917 
1918  /* Serialize access to <waiters_count_> */
1919  kmp_win32_mutex_t waiters_count_lock_;
1920 
1921  /* Number of threads to release via a <cond_broadcast> or a <cond_signal> */
1922  int release_count_;
1923 
1924  /* Keeps track of the current "generation" so that we don't allow */
1925  /* one thread to steal all the "releases" from the broadcast. */
1926  int wait_generation_count_;
1927 
1928  /* A manual-reset event that's used to block and release waiting threads. */
1929  HANDLE event_;
1930 } kmp_win32_cond_t;
1931 #endif
1932 
1933 #if KMP_OS_UNIX
1934 
1935 union KMP_ALIGN_CACHE kmp_cond_union {
1936  double c_align;
1937  char c_pad[CACHE_LINE];
1938  pthread_cond_t c_cond;
1939 };
1940 
1941 typedef union kmp_cond_union kmp_cond_align_t;
1942 
1943 union KMP_ALIGN_CACHE kmp_mutex_union {
1944  double m_align;
1945  char m_pad[CACHE_LINE];
1946  pthread_mutex_t m_mutex;
1947 };
1948 
1949 typedef union kmp_mutex_union kmp_mutex_align_t;
1950 
1951 #endif /* KMP_OS_UNIX */
1952 
1953 typedef struct kmp_desc_base {
1954  void *ds_stackbase;
1955  size_t ds_stacksize;
1956  int ds_stackgrow;
1957  kmp_thread_t ds_thread;
1958  volatile int ds_tid;
1959  int ds_gtid;
1960 #if KMP_OS_WINDOWS
1961  volatile int ds_alive;
1962  DWORD ds_thread_id;
1963 /* ds_thread keeps thread handle on Windows* OS. It is enough for RTL purposes.
1964  However, debugger support (libomp_db) cannot work with handles, because they
1965  uncomparable. For example, debugger requests info about thread with handle h.
1966  h is valid within debugger process, and meaningless within debugee process.
1967  Even if h is duped by call to DuplicateHandle(), so the result h' is valid
1968  within debugee process, but it is a *new* handle which does *not* equal to
1969  any other handle in debugee... The only way to compare handles is convert
1970  them to system-wide ids. GetThreadId() function is available only in
1971  Longhorn and Server 2003. :-( In contrast, GetCurrentThreadId() is available
1972  on all Windows* OS flavours (including Windows* 95). Thus, we have to get
1973  thread id by call to GetCurrentThreadId() from within the thread and save it
1974  to let libomp_db identify threads. */
1975 #endif /* KMP_OS_WINDOWS */
1976 } kmp_desc_base_t;
1977 
1978 typedef union KMP_ALIGN_CACHE kmp_desc {
1979  double ds_align; /* use worst case alignment */
1980  char ds_pad[KMP_PAD(kmp_desc_base_t, CACHE_LINE)];
1981  kmp_desc_base_t ds;
1982 } kmp_desc_t;
1983 
1984 typedef struct kmp_local {
1985  volatile int this_construct; /* count of single's encountered by thread */
1986  void *reduce_data;
1987 #if KMP_USE_BGET
1988  void *bget_data;
1989  void *bget_list;
1990 #if !USE_CMP_XCHG_FOR_BGET
1991 #ifdef USE_QUEUING_LOCK_FOR_BGET
1992  kmp_lock_t bget_lock; /* Lock for accessing bget free list */
1993 #else
1994  kmp_bootstrap_lock_t bget_lock; // Lock for accessing bget free list. Must be
1995 // bootstrap lock so we can use it at library
1996 // shutdown.
1997 #endif /* USE_LOCK_FOR_BGET */
1998 #endif /* ! USE_CMP_XCHG_FOR_BGET */
1999 #endif /* KMP_USE_BGET */
2000 
2001  PACKED_REDUCTION_METHOD_T
2002  packed_reduction_method; /* stored by __kmpc_reduce*(), used by
2003  __kmpc_end_reduce*() */
2004 
2005 } kmp_local_t;
2006 
2007 #define KMP_CHECK_UPDATE(a, b) \
2008  if ((a) != (b)) \
2009  (a) = (b)
2010 #define KMP_CHECK_UPDATE_SYNC(a, b) \
2011  if ((a) != (b)) \
2012  TCW_SYNC_PTR((a), (b))
2013 
2014 #define get__blocktime(xteam, xtid) \
2015  ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime)
2016 #define get__bt_set(xteam, xtid) \
2017  ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set)
2018 #if KMP_USE_MONITOR
2019 #define get__bt_intervals(xteam, xtid) \
2020  ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals)
2021 #endif
2022 
2023 #define get__nested_2(xteam, xtid) \
2024  ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.nested)
2025 #define get__dynamic_2(xteam, xtid) \
2026  ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.dynamic)
2027 #define get__nproc_2(xteam, xtid) \
2028  ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.nproc)
2029 #define get__sched_2(xteam, xtid) \
2030  ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.sched)
2031 
2032 #define set__blocktime_team(xteam, xtid, xval) \
2033  (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime) = \
2034  (xval))
2035 
2036 #if KMP_USE_MONITOR
2037 #define set__bt_intervals_team(xteam, xtid, xval) \
2038  (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals) = \
2039  (xval))
2040 #endif
2041 
2042 #define set__bt_set_team(xteam, xtid, xval) \
2043  (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set) = (xval))
2044 
2045 #define set__nested(xthread, xval) \
2046  (((xthread)->th.th_current_task->td_icvs.nested) = (xval))
2047 #define get__nested(xthread) \
2048  (((xthread)->th.th_current_task->td_icvs.nested) ? (FTN_TRUE) : (FTN_FALSE))
2049 
2050 #define set__dynamic(xthread, xval) \
2051  (((xthread)->th.th_current_task->td_icvs.dynamic) = (xval))
2052 #define get__dynamic(xthread) \
2053  (((xthread)->th.th_current_task->td_icvs.dynamic) ? (FTN_TRUE) : (FTN_FALSE))
2054 
2055 #define set__nproc(xthread, xval) \
2056  (((xthread)->th.th_current_task->td_icvs.nproc) = (xval))
2057 
2058 #define set__max_active_levels(xthread, xval) \
2059  (((xthread)->th.th_current_task->td_icvs.max_active_levels) = (xval))
2060 
2061 #define set__sched(xthread, xval) \
2062  (((xthread)->th.th_current_task->td_icvs.sched) = (xval))
2063 
2064 #if OMP_40_ENABLED
2065 
2066 #define set__proc_bind(xthread, xval) \
2067  (((xthread)->th.th_current_task->td_icvs.proc_bind) = (xval))
2068 #define get__proc_bind(xthread) \
2069  ((xthread)->th.th_current_task->td_icvs.proc_bind)
2070 
2071 #endif /* OMP_40_ENABLED */
2072 
2073 // OpenMP tasking data structures
2074 
2075 typedef enum kmp_tasking_mode {
2076  tskm_immediate_exec = 0,
2077  tskm_extra_barrier = 1,
2078  tskm_task_teams = 2,
2079  tskm_max = 2
2080 } kmp_tasking_mode_t;
2081 
2082 extern kmp_tasking_mode_t
2083  __kmp_tasking_mode; /* determines how/when to execute tasks */
2084 extern int __kmp_task_stealing_constraint;
2085 #if OMP_40_ENABLED
2086 extern kmp_int32 __kmp_default_device; // Set via OMP_DEFAULT_DEVICE if
2087 // specified, defaults to 0 otherwise
2088 #endif
2089 #if OMP_45_ENABLED
2090 // Set via OMP_MAX_TASK_PRIORITY if specified, defaults to 0 otherwise
2091 extern kmp_int32 __kmp_max_task_priority;
2092 // Set via KMP_TASKLOOP_MIN_TASKS if specified, defaults to 0 otherwise
2093 extern kmp_uint64 __kmp_taskloop_min_tasks;
2094 #endif
2095 
2096 /* NOTE: kmp_taskdata_t and kmp_task_t structures allocated in single block with
2097  taskdata first */
2098 #define KMP_TASK_TO_TASKDATA(task) (((kmp_taskdata_t *)task) - 1)
2099 #define KMP_TASKDATA_TO_TASK(taskdata) (kmp_task_t *)(taskdata + 1)
2100 
2101 // The tt_found_tasks flag is a signal to all threads in the team that tasks
2102 // were spawned and queued since the previous barrier release.
2103 #define KMP_TASKING_ENABLED(task_team) \
2104  (TCR_SYNC_4((task_team)->tt.tt_found_tasks) == TRUE)
2105 
2112 typedef kmp_int32 (*kmp_routine_entry_t)(kmp_int32, void *);
2113 
2114 #if OMP_40_ENABLED || OMP_45_ENABLED
2115 typedef union kmp_cmplrdata {
2116 #if OMP_45_ENABLED
2117  kmp_int32 priority;
2118 #endif // OMP_45_ENABLED
2119 #if OMP_40_ENABLED
2120  kmp_routine_entry_t
2121  destructors; /* pointer to function to invoke deconstructors of
2122  firstprivate C++ objects */
2123 #endif // OMP_40_ENABLED
2124  /* future data */
2125 } kmp_cmplrdata_t;
2126 #endif
2127 
2128 /* sizeof_kmp_task_t passed as arg to kmpc_omp_task call */
2131 typedef struct kmp_task { /* GEH: Shouldn't this be aligned somehow? */
2132  void *shareds;
2133  kmp_routine_entry_t
2134  routine;
2135  kmp_int32 part_id;
2136 #if OMP_40_ENABLED || OMP_45_ENABLED
2137  kmp_cmplrdata_t
2138  data1; /* Two known optional additions: destructors and priority */
2139  kmp_cmplrdata_t data2; /* Process destructors first, priority second */
2140 /* future data */
2141 #endif
2142  /* private vars */
2143 } kmp_task_t;
2144 
2149 #if OMP_40_ENABLED
2150 typedef struct kmp_taskgroup {
2151  std::atomic<kmp_int32> count; // number of allocated and incomplete tasks
2152  std::atomic<kmp_int32>
2153  cancel_request; // request for cancellation of this taskgroup
2154  struct kmp_taskgroup *parent; // parent taskgroup
2155 #if OMP_50_ENABLED
2156  // Block of data to perform task reduction
2157  void *reduce_data; // reduction related info
2158  kmp_int32 reduce_num_data; // number of data items to reduce
2159 #endif
2160 } kmp_taskgroup_t;
2161 
2162 // forward declarations
2163 typedef union kmp_depnode kmp_depnode_t;
2164 typedef struct kmp_depnode_list kmp_depnode_list_t;
2165 typedef struct kmp_dephash_entry kmp_dephash_entry_t;
2166 
2167 // Compiler sends us this info:
2168 typedef struct kmp_depend_info {
2169  kmp_intptr_t base_addr;
2170  size_t len;
2171  struct {
2172  bool in : 1;
2173  bool out : 1;
2174  bool mtx : 1;
2175  } flags;
2176 } kmp_depend_info_t;
2177 
2178 // Internal structures to work with task dependencies:
2179 struct kmp_depnode_list {
2180  kmp_depnode_t *node;
2181  kmp_depnode_list_t *next;
2182 };
2183 
2184 // Max number of mutexinoutset dependencies per node
2185 #define MAX_MTX_DEPS 4
2186 
2187 typedef struct kmp_base_depnode {
2188  kmp_depnode_list_t *successors; /* used under lock */
2189  kmp_task_t *task; /* non-NULL if depnode is active, used under lock */
2190  kmp_lock_t *mtx_locks[MAX_MTX_DEPS]; /* lock mutexinoutset dependent tasks */
2191  kmp_int32 mtx_num_locks; /* number of locks in mtx_locks array */
2192  kmp_lock_t lock; /* guards shared fields: task, successors */
2193 #if KMP_SUPPORT_GRAPH_OUTPUT
2194  kmp_uint32 id;
2195 #endif
2196  std::atomic<kmp_int32> npredecessors;
2197  std::atomic<kmp_int32> nrefs;
2198 } kmp_base_depnode_t;
2199 
2200 union KMP_ALIGN_CACHE kmp_depnode {
2201  double dn_align; /* use worst case alignment */
2202  char dn_pad[KMP_PAD(kmp_base_depnode_t, CACHE_LINE)];
2203  kmp_base_depnode_t dn;
2204 };
2205 
2206 struct kmp_dephash_entry {
2207  kmp_intptr_t addr;
2208  kmp_depnode_t *last_out;
2209  kmp_depnode_list_t *last_ins;
2210  kmp_depnode_list_t *last_mtxs;
2211  kmp_int32 last_flag;
2212  kmp_lock_t *mtx_lock; /* is referenced by depnodes w/mutexinoutset dep */
2213  kmp_dephash_entry_t *next_in_bucket;
2214 };
2215 
2216 typedef struct kmp_dephash {
2217  kmp_dephash_entry_t **buckets;
2218  size_t size;
2219 #ifdef KMP_DEBUG
2220  kmp_uint32 nelements;
2221  kmp_uint32 nconflicts;
2222 #endif
2223 } kmp_dephash_t;
2224 
2225 #endif
2226 
2227 #ifdef BUILD_TIED_TASK_STACK
2228 
2229 /* Tied Task stack definitions */
2230 typedef struct kmp_stack_block {
2231  kmp_taskdata_t *sb_block[TASK_STACK_BLOCK_SIZE];
2232  struct kmp_stack_block *sb_next;
2233  struct kmp_stack_block *sb_prev;
2234 } kmp_stack_block_t;
2235 
2236 typedef struct kmp_task_stack {
2237  kmp_stack_block_t ts_first_block; // first block of stack entries
2238  kmp_taskdata_t **ts_top; // pointer to the top of stack
2239  kmp_int32 ts_entries; // number of entries on the stack
2240 } kmp_task_stack_t;
2241 
2242 #endif // BUILD_TIED_TASK_STACK
2243 
2244 typedef struct kmp_tasking_flags { /* Total struct must be exactly 32 bits */
2245  /* Compiler flags */ /* Total compiler flags must be 16 bits */
2246  unsigned tiedness : 1; /* task is either tied (1) or untied (0) */
2247  unsigned final : 1; /* task is final(1) so execute immediately */
2248  unsigned merged_if0 : 1; /* no __kmpc_task_{begin/complete}_if0 calls in if0
2249  code path */
2250 #if OMP_40_ENABLED
2251  unsigned destructors_thunk : 1; /* set if the compiler creates a thunk to
2252  invoke destructors from the runtime */
2253 #if OMP_45_ENABLED
2254  unsigned proxy : 1; /* task is a proxy task (it will be executed outside the
2255  context of the RTL) */
2256  unsigned priority_specified : 1; /* set if the compiler provides priority
2257  setting for the task */
2258  unsigned reserved : 10; /* reserved for compiler use */
2259 #else
2260  unsigned reserved : 12; /* reserved for compiler use */
2261 #endif
2262 #else // OMP_40_ENABLED
2263  unsigned reserved : 13; /* reserved for compiler use */
2264 #endif // OMP_40_ENABLED
2265 
2266  /* Library flags */ /* Total library flags must be 16 bits */
2267  unsigned tasktype : 1; /* task is either explicit(1) or implicit (0) */
2268  unsigned task_serial : 1; // task is executed immediately (1) or deferred (0)
2269  unsigned tasking_ser : 1; // all tasks in team are either executed immediately
2270  // (1) or may be deferred (0)
2271  unsigned team_serial : 1; // entire team is serial (1) [1 thread] or parallel
2272  // (0) [>= 2 threads]
2273  /* If either team_serial or tasking_ser is set, task team may be NULL */
2274  /* Task State Flags: */
2275  unsigned started : 1; /* 1==started, 0==not started */
2276  unsigned executing : 1; /* 1==executing, 0==not executing */
2277  unsigned complete : 1; /* 1==complete, 0==not complete */
2278  unsigned freed : 1; /* 1==freed, 0==allocateed */
2279  unsigned native : 1; /* 1==gcc-compiled task, 0==intel */
2280  unsigned reserved31 : 7; /* reserved for library use */
2281 
2282 } kmp_tasking_flags_t;
2283 
2284 struct kmp_taskdata { /* aligned during dynamic allocation */
2285  kmp_int32 td_task_id; /* id, assigned by debugger */
2286  kmp_tasking_flags_t td_flags; /* task flags */
2287  kmp_team_t *td_team; /* team for this task */
2288  kmp_info_p *td_alloc_thread; /* thread that allocated data structures */
2289  /* Currently not used except for perhaps IDB */
2290  kmp_taskdata_t *td_parent; /* parent task */
2291  kmp_int32 td_level; /* task nesting level */
2292  std::atomic<kmp_int32> td_untied_count; // untied task active parts counter
2293  ident_t *td_ident; /* task identifier */
2294  // Taskwait data.
2295  ident_t *td_taskwait_ident;
2296  kmp_uint32 td_taskwait_counter;
2297  kmp_int32 td_taskwait_thread; /* gtid + 1 of thread encountered taskwait */
2298  KMP_ALIGN_CACHE kmp_internal_control_t
2299  td_icvs; /* Internal control variables for the task */
2300  KMP_ALIGN_CACHE std::atomic<kmp_int32>
2301  td_allocated_child_tasks; /* Child tasks (+ current task) not yet
2302  deallocated */
2303  std::atomic<kmp_int32>
2304  td_incomplete_child_tasks; /* Child tasks not yet complete */
2305 #if OMP_40_ENABLED
2306  kmp_taskgroup_t
2307  *td_taskgroup; // Each task keeps pointer to its current taskgroup
2308  kmp_dephash_t
2309  *td_dephash; // Dependencies for children tasks are tracked from here
2310  kmp_depnode_t
2311  *td_depnode; // Pointer to graph node if this task has dependencies
2312 #endif // OMP_40_ENABLED
2313 #if OMP_45_ENABLED
2314  kmp_task_team_t *td_task_team;
2315  kmp_int32 td_size_alloc; // The size of task structure, including shareds etc.
2316 #if defined(KMP_GOMP_COMPAT)
2317  // 4 or 8 byte integers for the loop bounds in GOMP_taskloop
2318  kmp_int32 td_size_loop_bounds;
2319 #endif
2320 #endif // OMP_45_ENABLED
2321  kmp_taskdata_t *td_last_tied; // keep tied task for task scheduling constraint
2322 #if defined(KMP_GOMP_COMPAT) && OMP_45_ENABLED
2323  // GOMP sends in a copy function for copy constructors
2324  void (*td_copy_func)(void *, void *);
2325 #endif
2326 #if OMPT_SUPPORT
2327  ompt_task_info_t ompt_task_info;
2328 #endif
2329 }; // struct kmp_taskdata
2330 
2331 // Make sure padding above worked
2332 KMP_BUILD_ASSERT(sizeof(kmp_taskdata_t) % sizeof(void *) == 0);
2333 
2334 // Data for task team but per thread
2335 typedef struct kmp_base_thread_data {
2336  kmp_info_p *td_thr; // Pointer back to thread info
2337  // Used only in __kmp_execute_tasks_template, maybe not avail until task is
2338  // queued?
2339  kmp_bootstrap_lock_t td_deque_lock; // Lock for accessing deque
2340  kmp_taskdata_t *
2341  *td_deque; // Deque of tasks encountered by td_thr, dynamically allocated
2342  kmp_int32 td_deque_size; // Size of deck
2343  kmp_uint32 td_deque_head; // Head of deque (will wrap)
2344  kmp_uint32 td_deque_tail; // Tail of deque (will wrap)
2345  kmp_int32 td_deque_ntasks; // Number of tasks in deque
2346  // GEH: shouldn't this be volatile since used in while-spin?
2347  kmp_int32 td_deque_last_stolen; // Thread number of last successful steal
2348 #ifdef BUILD_TIED_TASK_STACK
2349  kmp_task_stack_t td_susp_tied_tasks; // Stack of suspended tied tasks for task
2350 // scheduling constraint
2351 #endif // BUILD_TIED_TASK_STACK
2352 } kmp_base_thread_data_t;
2353 
2354 #define TASK_DEQUE_BITS 8 // Used solely to define INITIAL_TASK_DEQUE_SIZE
2355 #define INITIAL_TASK_DEQUE_SIZE (1 << TASK_DEQUE_BITS)
2356 
2357 #define TASK_DEQUE_SIZE(td) ((td).td_deque_size)
2358 #define TASK_DEQUE_MASK(td) ((td).td_deque_size - 1)
2359 
2360 typedef union KMP_ALIGN_CACHE kmp_thread_data {
2361  kmp_base_thread_data_t td;
2362  double td_align; /* use worst case alignment */
2363  char td_pad[KMP_PAD(kmp_base_thread_data_t, CACHE_LINE)];
2364 } kmp_thread_data_t;
2365 
2366 // Data for task teams which are used when tasking is enabled for the team
2367 typedef struct kmp_base_task_team {
2368  kmp_bootstrap_lock_t
2369  tt_threads_lock; /* Lock used to allocate per-thread part of task team */
2370  /* must be bootstrap lock since used at library shutdown*/
2371  kmp_task_team_t *tt_next; /* For linking the task team free list */
2372  kmp_thread_data_t
2373  *tt_threads_data; /* Array of per-thread structures for task team */
2374  /* Data survives task team deallocation */
2375  kmp_int32 tt_found_tasks; /* Have we found tasks and queued them while
2376  executing this team? */
2377  /* TRUE means tt_threads_data is set up and initialized */
2378  kmp_int32 tt_nproc; /* #threads in team */
2379  kmp_int32
2380  tt_max_threads; /* number of entries allocated for threads_data array */
2381 #if OMP_45_ENABLED
2382  kmp_int32
2383  tt_found_proxy_tasks; /* Have we found proxy tasks since last barrier */
2384 #endif
2385  kmp_int32 tt_untied_task_encountered;
2386 
2387  KMP_ALIGN_CACHE
2388  std::atomic<kmp_int32> tt_unfinished_threads; /* #threads still active */
2389 
2390  KMP_ALIGN_CACHE
2391  volatile kmp_uint32
2392  tt_active; /* is the team still actively executing tasks */
2393 } kmp_base_task_team_t;
2394 
2395 union KMP_ALIGN_CACHE kmp_task_team {
2396  kmp_base_task_team_t tt;
2397  double tt_align; /* use worst case alignment */
2398  char tt_pad[KMP_PAD(kmp_base_task_team_t, CACHE_LINE)];
2399 };
2400 
2401 #if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
2402 // Free lists keep same-size free memory slots for fast memory allocation
2403 // routines
2404 typedef struct kmp_free_list {
2405  void *th_free_list_self; // Self-allocated tasks free list
2406  void *th_free_list_sync; // Self-allocated tasks stolen/returned by other
2407  // threads
2408  void *th_free_list_other; // Non-self free list (to be returned to owner's
2409  // sync list)
2410 } kmp_free_list_t;
2411 #endif
2412 #if KMP_NESTED_HOT_TEAMS
2413 // Hot teams array keeps hot teams and their sizes for given thread. Hot teams
2414 // are not put in teams pool, and they don't put threads in threads pool.
2415 typedef struct kmp_hot_team_ptr {
2416  kmp_team_p *hot_team; // pointer to hot_team of given nesting level
2417  kmp_int32 hot_team_nth; // number of threads allocated for the hot_team
2418 } kmp_hot_team_ptr_t;
2419 #endif
2420 #if OMP_40_ENABLED
2421 typedef struct kmp_teams_size {
2422  kmp_int32 nteams; // number of teams in a league
2423  kmp_int32 nth; // number of threads in each team of the league
2424 } kmp_teams_size_t;
2425 #endif
2426 
2427 // OpenMP thread data structures
2428 
2429 typedef struct KMP_ALIGN_CACHE kmp_base_info {
2430  /* Start with the readonly data which is cache aligned and padded. This is
2431  written before the thread starts working by the master. Uber masters may
2432  update themselves later. Usage does not consider serialized regions. */
2433  kmp_desc_t th_info;
2434  kmp_team_p *th_team; /* team we belong to */
2435  kmp_root_p *th_root; /* pointer to root of task hierarchy */
2436  kmp_info_p *th_next_pool; /* next available thread in the pool */
2437  kmp_disp_t *th_dispatch; /* thread's dispatch data */
2438  int th_in_pool; /* in thread pool (32 bits for TCR/TCW) */
2439 
2440  /* The following are cached from the team info structure */
2441  /* TODO use these in more places as determined to be needed via profiling */
2442  int th_team_nproc; /* number of threads in a team */
2443  kmp_info_p *th_team_master; /* the team's master thread */
2444  int th_team_serialized; /* team is serialized */
2445 #if OMP_40_ENABLED
2446  microtask_t th_teams_microtask; /* save entry address for teams construct */
2447  int th_teams_level; /* save initial level of teams construct */
2448 /* it is 0 on device but may be any on host */
2449 #endif
2450 
2451 /* The blocktime info is copied from the team struct to the thread sruct */
2452 /* at the start of a barrier, and the values stored in the team are used */
2453 /* at points in the code where the team struct is no longer guaranteed */
2454 /* to exist (from the POV of worker threads). */
2455 #if KMP_USE_MONITOR
2456  int th_team_bt_intervals;
2457  int th_team_bt_set;
2458 #else
2459  kmp_uint64 th_team_bt_intervals;
2460 #endif
2461 
2462 #if KMP_AFFINITY_SUPPORTED
2463  kmp_affin_mask_t *th_affin_mask; /* thread's current affinity mask */
2464 #endif
2465 #if OMP_50_ENABLED
2466  void *const *th_def_allocator; /* per implicit task default allocator */
2467 #endif
2468  /* The data set by the master at reinit, then R/W by the worker */
2469  KMP_ALIGN_CACHE int
2470  th_set_nproc; /* if > 0, then only use this request for the next fork */
2471 #if KMP_NESTED_HOT_TEAMS
2472  kmp_hot_team_ptr_t *th_hot_teams; /* array of hot teams */
2473 #endif
2474 #if OMP_40_ENABLED
2475  kmp_proc_bind_t
2476  th_set_proc_bind; /* if != proc_bind_default, use request for next fork */
2477  kmp_teams_size_t
2478  th_teams_size; /* number of teams/threads in teams construct */
2479 #if KMP_AFFINITY_SUPPORTED
2480  int th_current_place; /* place currently bound to */
2481  int th_new_place; /* place to bind to in par reg */
2482  int th_first_place; /* first place in partition */
2483  int th_last_place; /* last place in partition */
2484 #endif
2485 #endif
2486 #if USE_ITT_BUILD
2487  kmp_uint64 th_bar_arrive_time; /* arrival to barrier timestamp */
2488  kmp_uint64 th_bar_min_time; /* minimum arrival time at the barrier */
2489  kmp_uint64 th_frame_time; /* frame timestamp */
2490 #endif /* USE_ITT_BUILD */
2491  kmp_local_t th_local;
2492  struct private_common *th_pri_head;
2493 
2494  /* Now the data only used by the worker (after initial allocation) */
2495  /* TODO the first serial team should actually be stored in the info_t
2496  structure. this will help reduce initial allocation overhead */
2497  KMP_ALIGN_CACHE kmp_team_p
2498  *th_serial_team; /*serialized team held in reserve*/
2499 
2500 #if OMPT_SUPPORT
2501  ompt_thread_info_t ompt_thread_info;
2502 #endif
2503 
2504  /* The following are also read by the master during reinit */
2505  struct common_table *th_pri_common;
2506 
2507  volatile kmp_uint32 th_spin_here; /* thread-local location for spinning */
2508  /* while awaiting queuing lock acquire */
2509 
2510  volatile void *th_sleep_loc; // this points at a kmp_flag<T>
2511 
2512  ident_t *th_ident;
2513  unsigned th_x; // Random number generator data
2514  unsigned th_a; // Random number generator data
2515 
2516  /* Tasking-related data for the thread */
2517  kmp_task_team_t *th_task_team; // Task team struct
2518  kmp_taskdata_t *th_current_task; // Innermost Task being executed
2519  kmp_uint8 th_task_state; // alternating 0/1 for task team identification
2520  kmp_uint8 *th_task_state_memo_stack; // Stack holding memos of th_task_state
2521  // at nested levels
2522  kmp_uint32 th_task_state_top; // Top element of th_task_state_memo_stack
2523  kmp_uint32 th_task_state_stack_sz; // Size of th_task_state_memo_stack
2524  kmp_uint32 th_reap_state; // Non-zero indicates thread is not
2525  // tasking, thus safe to reap
2526 
2527  /* More stuff for keeping track of active/sleeping threads (this part is
2528  written by the worker thread) */
2529  kmp_uint8 th_active_in_pool; // included in count of #active threads in pool
2530  int th_active; // ! sleeping; 32 bits for TCR/TCW
2531  struct cons_header *th_cons; // used for consistency check
2532 #if KMP_USE_HIER_SCHED
2533  // used for hierarchical scheduling
2534  kmp_hier_private_bdata_t *th_hier_bar_data;
2535 #endif
2536 
2537  /* Add the syncronizing data which is cache aligned and padded. */
2538  KMP_ALIGN_CACHE kmp_balign_t th_bar[bs_last_barrier];
2539 
2540  KMP_ALIGN_CACHE volatile kmp_int32
2541  th_next_waiting; /* gtid+1 of next thread on lock wait queue, 0 if none */
2542 
2543 #if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
2544 #define NUM_LISTS 4
2545  kmp_free_list_t th_free_lists[NUM_LISTS]; // Free lists for fast memory
2546 // allocation routines
2547 #endif
2548 
2549 #if KMP_OS_WINDOWS
2550  kmp_win32_cond_t th_suspend_cv;
2551  kmp_win32_mutex_t th_suspend_mx;
2552  int th_suspend_init;
2553 #endif
2554 #if KMP_OS_UNIX
2555  kmp_cond_align_t th_suspend_cv;
2556  kmp_mutex_align_t th_suspend_mx;
2557  int th_suspend_init_count;
2558 #endif
2559 
2560 #if USE_ITT_BUILD
2561  kmp_itt_mark_t th_itt_mark_single;
2562 // alignment ???
2563 #endif /* USE_ITT_BUILD */
2564 #if KMP_STATS_ENABLED
2565  kmp_stats_list *th_stats;
2566 #endif
2567 #if KMP_OS_UNIX
2568  std::atomic<bool> th_blocking;
2569 #endif
2570 } kmp_base_info_t;
2571 
2572 typedef union KMP_ALIGN_CACHE kmp_info {
2573  double th_align; /* use worst case alignment */
2574  char th_pad[KMP_PAD(kmp_base_info_t, CACHE_LINE)];
2575  kmp_base_info_t th;
2576 } kmp_info_t;
2577 
2578 // OpenMP thread team data structures
2579 
2580 typedef struct kmp_base_data { volatile kmp_uint32 t_value; } kmp_base_data_t;
2581 
2582 typedef union KMP_ALIGN_CACHE kmp_sleep_team {
2583  double dt_align; /* use worst case alignment */
2584  char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
2585  kmp_base_data_t dt;
2586 } kmp_sleep_team_t;
2587 
2588 typedef union KMP_ALIGN_CACHE kmp_ordered_team {
2589  double dt_align; /* use worst case alignment */
2590  char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
2591  kmp_base_data_t dt;
2592 } kmp_ordered_team_t;
2593 
2594 typedef int (*launch_t)(int gtid);
2595 
2596 /* Minimum number of ARGV entries to malloc if necessary */
2597 #define KMP_MIN_MALLOC_ARGV_ENTRIES 100
2598 
2599 // Set up how many argv pointers will fit in cache lines containing
2600 // t_inline_argv. Historically, we have supported at least 96 bytes. Using a
2601 // larger value for more space between the master write/worker read section and
2602 // read/write by all section seems to buy more performance on EPCC PARALLEL.
2603 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2604 #define KMP_INLINE_ARGV_BYTES \
2605  (4 * CACHE_LINE - \
2606  ((3 * KMP_PTR_SKIP + 2 * sizeof(int) + 2 * sizeof(kmp_int8) + \
2607  sizeof(kmp_int16) + sizeof(kmp_uint32)) % \
2608  CACHE_LINE))
2609 #else
2610 #define KMP_INLINE_ARGV_BYTES \
2611  (2 * CACHE_LINE - ((3 * KMP_PTR_SKIP + 2 * sizeof(int)) % CACHE_LINE))
2612 #endif
2613 #define KMP_INLINE_ARGV_ENTRIES (int)(KMP_INLINE_ARGV_BYTES / KMP_PTR_SKIP)
2614 
2615 typedef struct KMP_ALIGN_CACHE kmp_base_team {
2616  // Synchronization Data
2617  // ---------------------------------------------------------------------------
2618  KMP_ALIGN_CACHE kmp_ordered_team_t t_ordered;
2619  kmp_balign_team_t t_bar[bs_last_barrier];
2620  std::atomic<int> t_construct; // count of single directive encountered by team
2621  char pad[sizeof(kmp_lock_t)]; // padding to maintain performance on big iron
2622 
2623  // Master only
2624  // ---------------------------------------------------------------------------
2625  KMP_ALIGN_CACHE int t_master_tid; // tid of master in parent team
2626  int t_master_this_cons; // "this_construct" single counter of master in parent
2627  // team
2628  ident_t *t_ident; // if volatile, have to change too much other crud to
2629  // volatile too
2630  kmp_team_p *t_parent; // parent team
2631  kmp_team_p *t_next_pool; // next free team in the team pool
2632  kmp_disp_t *t_dispatch; // thread's dispatch data
2633  kmp_task_team_t *t_task_team[2]; // Task team struct; switch between 2
2634 #if OMP_40_ENABLED
2635  kmp_proc_bind_t t_proc_bind; // bind type for par region
2636 #endif // OMP_40_ENABLED
2637 #if USE_ITT_BUILD
2638  kmp_uint64 t_region_time; // region begin timestamp
2639 #endif /* USE_ITT_BUILD */
2640 
2641  // Master write, workers read
2642  // --------------------------------------------------------------------------
2643  KMP_ALIGN_CACHE void **t_argv;
2644  int t_argc;
2645  int t_nproc; // number of threads in team
2646  microtask_t t_pkfn;
2647  launch_t t_invoke; // procedure to launch the microtask
2648 
2649 #if OMPT_SUPPORT
2650  ompt_team_info_t ompt_team_info;
2651  ompt_lw_taskteam_t *ompt_serialized_team_info;
2652 #endif
2653 
2654 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2655  kmp_int8 t_fp_control_saved;
2656  kmp_int8 t_pad2b;
2657  kmp_int16 t_x87_fpu_control_word; // FP control regs
2658  kmp_uint32 t_mxcsr;
2659 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
2660 
2661  void *t_inline_argv[KMP_INLINE_ARGV_ENTRIES];
2662 
2663  KMP_ALIGN_CACHE kmp_info_t **t_threads;
2664  kmp_taskdata_t
2665  *t_implicit_task_taskdata; // Taskdata for the thread's implicit task
2666  int t_level; // nested parallel level
2667 
2668  KMP_ALIGN_CACHE int t_max_argc;
2669  int t_max_nproc; // max threads this team can handle (dynamicly expandable)
2670  int t_serialized; // levels deep of serialized teams
2671  dispatch_shared_info_t *t_disp_buffer; // buffers for dispatch system
2672  int t_id; // team's id, assigned by debugger.
2673  int t_active_level; // nested active parallel level
2674  kmp_r_sched_t t_sched; // run-time schedule for the team
2675 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
2676  int t_first_place; // first & last place in parent thread's partition.
2677  int t_last_place; // Restore these values to master after par region.
2678 #endif // OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
2679  int t_size_changed; // team size was changed?: 0: no, 1: yes, -1: changed via
2680 // omp_set_num_threads() call
2681 #if OMP_50_ENABLED
2682  void *const *t_def_allocator; /* per implicit task default allocator */
2683 #endif
2684 
2685 // Read/write by workers as well
2686 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
2687  // Using CACHE_LINE=64 reduces memory footprint, but causes a big perf
2688  // regression of epcc 'parallel' and 'barrier' on fxe256lin01. This extra
2689  // padding serves to fix the performance of epcc 'parallel' and 'barrier' when
2690  // CACHE_LINE=64. TODO: investigate more and get rid if this padding.
2691  char dummy_padding[1024];
2692 #endif
2693  // Internal control stack for additional nested teams.
2694  KMP_ALIGN_CACHE kmp_internal_control_t *t_control_stack_top;
2695 // for SERIALIZED teams nested 2 or more levels deep
2696 #if OMP_40_ENABLED
2697  // typed flag to store request state of cancellation
2698  std::atomic<kmp_int32> t_cancel_request;
2699 #endif
2700  int t_master_active; // save on fork, restore on join
2701  kmp_taskq_t t_taskq; // this team's task queue
2702  void *t_copypriv_data; // team specific pointer to copyprivate data array
2703 #if KMP_OS_WINDOWS
2704  std::atomic<kmp_uint32> t_copyin_counter;
2705 #endif
2706 #if USE_ITT_BUILD
2707  void *t_stack_id; // team specific stack stitching id (for ittnotify)
2708 #endif /* USE_ITT_BUILD */
2709 } kmp_base_team_t;
2710 
2711 union KMP_ALIGN_CACHE kmp_team {
2712  kmp_base_team_t t;
2713  double t_align; /* use worst case alignment */
2714  char t_pad[KMP_PAD(kmp_base_team_t, CACHE_LINE)];
2715 };
2716 
2717 typedef union KMP_ALIGN_CACHE kmp_time_global {
2718  double dt_align; /* use worst case alignment */
2719  char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
2720  kmp_base_data_t dt;
2721 } kmp_time_global_t;
2722 
2723 typedef struct kmp_base_global {
2724  /* cache-aligned */
2725  kmp_time_global_t g_time;
2726 
2727  /* non cache-aligned */
2728  volatile int g_abort;
2729  volatile int g_done;
2730 
2731  int g_dynamic;
2732  enum dynamic_mode g_dynamic_mode;
2733 } kmp_base_global_t;
2734 
2735 typedef union KMP_ALIGN_CACHE kmp_global {
2736  kmp_base_global_t g;
2737  double g_align; /* use worst case alignment */
2738  char g_pad[KMP_PAD(kmp_base_global_t, CACHE_LINE)];
2739 } kmp_global_t;
2740 
2741 typedef struct kmp_base_root {
2742  // TODO: GEH - combine r_active with r_in_parallel then r_active ==
2743  // (r_in_parallel>= 0)
2744  // TODO: GEH - then replace r_active with t_active_levels if we can to reduce
2745  // the synch overhead or keeping r_active
2746  volatile int r_active; /* TRUE if some region in a nest has > 1 thread */
2747  // GEH: This is misnamed, should be r_in_parallel
2748  volatile int r_nested; // TODO: GEH - This is unused, just remove it entirely.
2749  // keeps a count of active parallel regions per root
2750  std::atomic<int> r_in_parallel;
2751  // GEH: This is misnamed, should be r_active_levels
2752  kmp_team_t *r_root_team;
2753  kmp_team_t *r_hot_team;
2754  kmp_info_t *r_uber_thread;
2755  kmp_lock_t r_begin_lock;
2756  volatile int r_begin;
2757  int r_blocktime; /* blocktime for this root and descendants */
2758  int r_cg_nthreads; // count of active threads in a contention group
2759 } kmp_base_root_t;
2760 
2761 typedef union KMP_ALIGN_CACHE kmp_root {
2762  kmp_base_root_t r;
2763  double r_align; /* use worst case alignment */
2764  char r_pad[KMP_PAD(kmp_base_root_t, CACHE_LINE)];
2765 } kmp_root_t;
2766 
2767 struct fortran_inx_info {
2768  kmp_int32 data;
2769 };
2770 
2771 /* ------------------------------------------------------------------------ */
2772 
2773 extern int __kmp_settings;
2774 extern int __kmp_duplicate_library_ok;
2775 #if USE_ITT_BUILD
2776 extern int __kmp_forkjoin_frames;
2777 extern int __kmp_forkjoin_frames_mode;
2778 #endif
2779 extern PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method;
2780 extern int __kmp_determ_red;
2781 
2782 #ifdef KMP_DEBUG
2783 extern int kmp_a_debug;
2784 extern int kmp_b_debug;
2785 extern int kmp_c_debug;
2786 extern int kmp_d_debug;
2787 extern int kmp_e_debug;
2788 extern int kmp_f_debug;
2789 #endif /* KMP_DEBUG */
2790 
2791 /* For debug information logging using rotating buffer */
2792 #define KMP_DEBUG_BUF_LINES_INIT 512
2793 #define KMP_DEBUG_BUF_LINES_MIN 1
2794 
2795 #define KMP_DEBUG_BUF_CHARS_INIT 128
2796 #define KMP_DEBUG_BUF_CHARS_MIN 2
2797 
2798 extern int
2799  __kmp_debug_buf; /* TRUE means use buffer, FALSE means print to stderr */
2800 extern int __kmp_debug_buf_lines; /* How many lines of debug stored in buffer */
2801 extern int
2802  __kmp_debug_buf_chars; /* How many characters allowed per line in buffer */
2803 extern int __kmp_debug_buf_atomic; /* TRUE means use atomic update of buffer
2804  entry pointer */
2805 
2806 extern char *__kmp_debug_buffer; /* Debug buffer itself */
2807 extern std::atomic<int> __kmp_debug_count; /* Counter for number of lines
2808  printed in buffer so far */
2809 extern int __kmp_debug_buf_warn_chars; /* Keep track of char increase
2810  recommended in warnings */
2811 /* end rotating debug buffer */
2812 
2813 #ifdef KMP_DEBUG
2814 extern int __kmp_par_range; /* +1 => only go par for constructs in range */
2815 
2816 #define KMP_PAR_RANGE_ROUTINE_LEN 1024
2817 extern char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN];
2818 #define KMP_PAR_RANGE_FILENAME_LEN 1024
2819 extern char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN];
2820 extern int __kmp_par_range_lb;
2821 extern int __kmp_par_range_ub;
2822 #endif
2823 
2824 /* For printing out dynamic storage map for threads and teams */
2825 extern int
2826  __kmp_storage_map; /* True means print storage map for threads and teams */
2827 extern int __kmp_storage_map_verbose; /* True means storage map includes
2828  placement info */
2829 extern int __kmp_storage_map_verbose_specified;
2830 
2831 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2832 extern kmp_cpuinfo_t __kmp_cpuinfo;
2833 #endif
2834 
2835 extern volatile int __kmp_init_serial;
2836 extern volatile int __kmp_init_gtid;
2837 extern volatile int __kmp_init_common;
2838 extern volatile int __kmp_init_middle;
2839 extern volatile int __kmp_init_parallel;
2840 #if KMP_USE_MONITOR
2841 extern volatile int __kmp_init_monitor;
2842 #endif
2843 extern volatile int __kmp_init_user_locks;
2844 extern int __kmp_init_counter;
2845 extern int __kmp_root_counter;
2846 extern int __kmp_version;
2847 
2848 /* list of address of allocated caches for commons */
2849 extern kmp_cached_addr_t *__kmp_threadpriv_cache_list;
2850 
2851 /* Barrier algorithm types and options */
2852 extern kmp_uint32 __kmp_barrier_gather_bb_dflt;
2853 extern kmp_uint32 __kmp_barrier_release_bb_dflt;
2854 extern kmp_bar_pat_e __kmp_barrier_gather_pat_dflt;
2855 extern kmp_bar_pat_e __kmp_barrier_release_pat_dflt;
2856 extern kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier];
2857 extern kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier];
2858 extern kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier];
2859 extern kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier];
2860 extern char const *__kmp_barrier_branch_bit_env_name[bs_last_barrier];
2861 extern char const *__kmp_barrier_pattern_env_name[bs_last_barrier];
2862 extern char const *__kmp_barrier_type_name[bs_last_barrier];
2863 extern char const *__kmp_barrier_pattern_name[bp_last_bar];
2864 
2865 /* Global Locks */
2866 extern kmp_bootstrap_lock_t __kmp_initz_lock; /* control initialization */
2867 extern kmp_bootstrap_lock_t __kmp_forkjoin_lock; /* control fork/join access */
2868 extern kmp_bootstrap_lock_t __kmp_task_team_lock;
2869 extern kmp_bootstrap_lock_t
2870  __kmp_exit_lock; /* exit() is not always thread-safe */
2871 #if KMP_USE_MONITOR
2872 extern kmp_bootstrap_lock_t
2873  __kmp_monitor_lock; /* control monitor thread creation */
2874 #endif
2875 extern kmp_bootstrap_lock_t
2876  __kmp_tp_cached_lock; /* used for the hack to allow threadprivate cache and
2877  __kmp_threads expansion to co-exist */
2878 
2879 extern kmp_lock_t __kmp_global_lock; /* control OS/global access */
2880 extern kmp_queuing_lock_t __kmp_dispatch_lock; /* control dispatch access */
2881 extern kmp_lock_t __kmp_debug_lock; /* control I/O access for KMP_DEBUG */
2882 
2883 /* used for yielding spin-waits */
2884 extern unsigned int __kmp_init_wait; /* initial number of spin-tests */
2885 extern unsigned int __kmp_next_wait; /* susequent number of spin-tests */
2886 
2887 extern enum library_type __kmp_library;
2888 
2889 extern enum sched_type __kmp_sched; /* default runtime scheduling */
2890 extern enum sched_type __kmp_static; /* default static scheduling method */
2891 extern enum sched_type __kmp_guided; /* default guided scheduling method */
2892 extern enum sched_type __kmp_auto; /* default auto scheduling method */
2893 extern int __kmp_chunk; /* default runtime chunk size */
2894 
2895 extern size_t __kmp_stksize; /* stack size per thread */
2896 #if KMP_USE_MONITOR
2897 extern size_t __kmp_monitor_stksize; /* stack size for monitor thread */
2898 #endif
2899 extern size_t __kmp_stkoffset; /* stack offset per thread */
2900 extern int __kmp_stkpadding; /* Should we pad root thread(s) stack */
2901 
2902 extern size_t
2903  __kmp_malloc_pool_incr; /* incremental size of pool for kmp_malloc() */
2904 extern int __kmp_env_stksize; /* was KMP_STACKSIZE specified? */
2905 extern int __kmp_env_blocktime; /* was KMP_BLOCKTIME specified? */
2906 extern int __kmp_env_checks; /* was KMP_CHECKS specified? */
2907 extern int __kmp_env_consistency_check; // was KMP_CONSISTENCY_CHECK specified?
2908 extern int __kmp_generate_warnings; /* should we issue warnings? */
2909 extern int __kmp_reserve_warn; /* have we issued reserve_threads warning? */
2910 
2911 #ifdef DEBUG_SUSPEND
2912 extern int __kmp_suspend_count; /* count inside __kmp_suspend_template() */
2913 #endif
2914 
2915 extern kmp_uint32 __kmp_yield_init;
2916 extern kmp_uint32 __kmp_yield_next;
2917 
2918 #if KMP_USE_MONITOR
2919 extern kmp_uint32 __kmp_yielding_on;
2920 #endif
2921 extern kmp_uint32 __kmp_yield_cycle;
2922 extern kmp_int32 __kmp_yield_on_count;
2923 extern kmp_int32 __kmp_yield_off_count;
2924 
2925 /* ------------------------------------------------------------------------- */
2926 extern int __kmp_allThreadsSpecified;
2927 
2928 extern size_t __kmp_align_alloc;
2929 /* following data protected by initialization routines */
2930 extern int __kmp_xproc; /* number of processors in the system */
2931 extern int __kmp_avail_proc; /* number of processors available to the process */
2932 extern size_t __kmp_sys_min_stksize; /* system-defined minimum stack size */
2933 extern int __kmp_sys_max_nth; /* system-imposed maximum number of threads */
2934 // maximum total number of concurrently-existing threads on device
2935 extern int __kmp_max_nth;
2936 // maximum total number of concurrently-existing threads in a contention group
2937 extern int __kmp_cg_max_nth;
2938 extern int __kmp_teams_max_nth; // max threads used in a teams construct
2939 extern int __kmp_threads_capacity; /* capacity of the arrays __kmp_threads and
2940  __kmp_root */
2941 extern int __kmp_dflt_team_nth; /* default number of threads in a parallel
2942  region a la OMP_NUM_THREADS */
2943 extern int __kmp_dflt_team_nth_ub; /* upper bound on "" determined at serial
2944  initialization */
2945 extern int __kmp_tp_capacity; /* capacity of __kmp_threads if threadprivate is
2946  used (fixed) */
2947 extern int __kmp_tp_cached; /* whether threadprivate cache has been created
2948  (__kmpc_threadprivate_cached()) */
2949 extern int __kmp_dflt_nested; /* nested parallelism enabled by default a la
2950  OMP_NESTED */
2951 extern int __kmp_dflt_blocktime; /* number of milliseconds to wait before
2952  blocking (env setting) */
2953 #if KMP_USE_MONITOR
2954 extern int
2955  __kmp_monitor_wakeups; /* number of times monitor wakes up per second */
2956 extern int __kmp_bt_intervals; /* number of monitor timestamp intervals before
2957  blocking */
2958 #endif
2959 #ifdef KMP_ADJUST_BLOCKTIME
2960 extern int __kmp_zero_bt; /* whether blocktime has been forced to zero */
2961 #endif /* KMP_ADJUST_BLOCKTIME */
2962 #ifdef KMP_DFLT_NTH_CORES
2963 extern int __kmp_ncores; /* Total number of cores for threads placement */
2964 #endif
2965 /* Number of millisecs to delay on abort for Intel(R) VTune(TM) tools */
2966 extern int __kmp_abort_delay;
2967 
2968 extern int __kmp_need_register_atfork_specified;
2969 extern int
2970  __kmp_need_register_atfork; /* At initialization, call pthread_atfork to
2971  install fork handler */
2972 extern int __kmp_gtid_mode; /* Method of getting gtid, values:
2973  0 - not set, will be set at runtime
2974  1 - using stack search
2975  2 - dynamic TLS (pthread_getspecific(Linux* OS/OS
2976  X*) or TlsGetValue(Windows* OS))
2977  3 - static TLS (__declspec(thread) __kmp_gtid),
2978  Linux* OS .so only. */
2979 extern int
2980  __kmp_adjust_gtid_mode; /* If true, adjust method based on #threads */
2981 #ifdef KMP_TDATA_GTID
2982 extern KMP_THREAD_LOCAL int __kmp_gtid;
2983 #endif
2984 extern int __kmp_tls_gtid_min; /* #threads below which use sp search for gtid */
2985 extern int __kmp_foreign_tp; // If true, separate TP var for each foreign thread
2986 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2987 extern int __kmp_inherit_fp_control; // copy fp creg(s) parent->workers at fork
2988 extern kmp_int16 __kmp_init_x87_fpu_control_word; // init thread's FP ctrl reg
2989 extern kmp_uint32 __kmp_init_mxcsr; /* init thread's mxscr */
2990 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
2991 
2992 extern int __kmp_dflt_max_active_levels; /* max_active_levels for nested
2993  parallelism enabled by default via
2994  OMP_MAX_ACTIVE_LEVELS */
2995 extern int __kmp_dispatch_num_buffers; /* max possible dynamic loops in
2996  concurrent execution per team */
2997 #if KMP_NESTED_HOT_TEAMS
2998 extern int __kmp_hot_teams_mode;
2999 extern int __kmp_hot_teams_max_level;
3000 #endif
3001 
3002 #if KMP_OS_LINUX
3003 extern enum clock_function_type __kmp_clock_function;
3004 extern int __kmp_clock_function_param;
3005 #endif /* KMP_OS_LINUX */
3006 
3007 #if KMP_MIC_SUPPORTED
3008 extern enum mic_type __kmp_mic_type;
3009 #endif
3010 
3011 #ifdef USE_LOAD_BALANCE
3012 extern double __kmp_load_balance_interval; // load balance algorithm interval
3013 #endif /* USE_LOAD_BALANCE */
3014 
3015 // OpenMP 3.1 - Nested num threads array
3016 typedef struct kmp_nested_nthreads_t {
3017  int *nth;
3018  int size;
3019  int used;
3020 } kmp_nested_nthreads_t;
3021 
3022 extern kmp_nested_nthreads_t __kmp_nested_nth;
3023 
3024 #if KMP_USE_ADAPTIVE_LOCKS
3025 
3026 // Parameters for the speculative lock backoff system.
3027 struct kmp_adaptive_backoff_params_t {
3028  // Number of soft retries before it counts as a hard retry.
3029  kmp_uint32 max_soft_retries;
3030  // Badness is a bit mask : 0,1,3,7,15,... on each hard failure we move one to
3031  // the right
3032  kmp_uint32 max_badness;
3033 };
3034 
3035 extern kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params;
3036 
3037 #if KMP_DEBUG_ADAPTIVE_LOCKS
3038 extern const char *__kmp_speculative_statsfile;
3039 #endif
3040 
3041 #endif // KMP_USE_ADAPTIVE_LOCKS
3042 
3043 #if OMP_40_ENABLED
3044 extern int __kmp_display_env; /* TRUE or FALSE */
3045 extern int __kmp_display_env_verbose; /* TRUE if OMP_DISPLAY_ENV=VERBOSE */
3046 extern int __kmp_omp_cancellation; /* TRUE or FALSE */
3047 #endif
3048 
3049 /* ------------------------------------------------------------------------- */
3050 
3051 /* the following are protected by the fork/join lock */
3052 /* write: lock read: anytime */
3053 extern kmp_info_t **__kmp_threads; /* Descriptors for the threads */
3054 /* read/write: lock */
3055 extern volatile kmp_team_t *__kmp_team_pool;
3056 extern volatile kmp_info_t *__kmp_thread_pool;
3057 extern kmp_info_t *__kmp_thread_pool_insert_pt;
3058 
3059 // total num threads reachable from some root thread including all root threads
3060 extern volatile int __kmp_nth;
3061 /* total number of threads reachable from some root thread including all root
3062  threads, and those in the thread pool */
3063 extern volatile int __kmp_all_nth;
3064 extern int __kmp_thread_pool_nth;
3065 extern std::atomic<int> __kmp_thread_pool_active_nth;
3066 
3067 extern kmp_root_t **__kmp_root; /* root of thread hierarchy */
3068 /* end data protected by fork/join lock */
3069 /* ------------------------------------------------------------------------- */
3070 
3071 #define __kmp_get_gtid() __kmp_get_global_thread_id()
3072 #define __kmp_entry_gtid() __kmp_get_global_thread_id_reg()
3073 #define __kmp_get_tid() (__kmp_tid_from_gtid(__kmp_get_gtid()))
3074 #define __kmp_get_team() (__kmp_threads[(__kmp_get_gtid())]->th.th_team)
3075 #define __kmp_get_thread() (__kmp_thread_from_gtid(__kmp_get_gtid()))
3076 
3077 // AT: Which way is correct?
3078 // AT: 1. nproc = __kmp_threads[ ( gtid ) ] -> th.th_team -> t.t_nproc;
3079 // AT: 2. nproc = __kmp_threads[ ( gtid ) ] -> th.th_team_nproc;
3080 #define __kmp_get_team_num_threads(gtid) \
3081  (__kmp_threads[(gtid)]->th.th_team->t.t_nproc)
3082 
3083 static inline bool KMP_UBER_GTID(int gtid) {
3084  KMP_DEBUG_ASSERT(gtid >= KMP_GTID_MIN);
3085  KMP_DEBUG_ASSERT(gtid < __kmp_threads_capacity);
3086  return (gtid >= 0 && __kmp_root[gtid] && __kmp_threads[gtid] &&
3087  __kmp_threads[gtid] == __kmp_root[gtid]->r.r_uber_thread);
3088 }
3089 
3090 static inline int __kmp_tid_from_gtid(int gtid) {
3091  KMP_DEBUG_ASSERT(gtid >= 0);
3092  return __kmp_threads[gtid]->th.th_info.ds.ds_tid;
3093 }
3094 
3095 static inline int __kmp_gtid_from_tid(int tid, const kmp_team_t *team) {
3096  KMP_DEBUG_ASSERT(tid >= 0 && team);
3097  return team->t.t_threads[tid]->th.th_info.ds.ds_gtid;
3098 }
3099 
3100 static inline int __kmp_gtid_from_thread(const kmp_info_t *thr) {
3101  KMP_DEBUG_ASSERT(thr);
3102  return thr->th.th_info.ds.ds_gtid;
3103 }
3104 
3105 static inline kmp_info_t *__kmp_thread_from_gtid(int gtid) {
3106  KMP_DEBUG_ASSERT(gtid >= 0);
3107  return __kmp_threads[gtid];
3108 }
3109 
3110 static inline kmp_team_t *__kmp_team_from_gtid(int gtid) {
3111  KMP_DEBUG_ASSERT(gtid >= 0);
3112  return __kmp_threads[gtid]->th.th_team;
3113 }
3114 
3115 /* ------------------------------------------------------------------------- */
3116 
3117 extern kmp_global_t __kmp_global; /* global status */
3118 
3119 extern kmp_info_t __kmp_monitor;
3120 // For Debugging Support Library
3121 extern std::atomic<kmp_uint32> __kmp_team_counter;
3122 // For Debugging Support Library
3123 extern std::atomic<kmp_uint32> __kmp_task_counter;
3124 
3125 #if USE_DEBUGGER
3126 #define _KMP_GEN_ID(counter) \
3127  (__kmp_debugging ? KMP_ATOMIC_INC(&counter) + 1 : ~0)
3128 #else
3129 #define _KMP_GEN_ID(counter) (~0)
3130 #endif /* USE_DEBUGGER */
3131 
3132 #define KMP_GEN_TASK_ID() _KMP_GEN_ID(__kmp_task_counter)
3133 #define KMP_GEN_TEAM_ID() _KMP_GEN_ID(__kmp_team_counter)
3134 
3135 /* ------------------------------------------------------------------------ */
3136 
3137 extern void __kmp_print_storage_map_gtid(int gtid, void *p1, void *p2,
3138  size_t size, char const *format, ...);
3139 
3140 extern void __kmp_serial_initialize(void);
3141 extern void __kmp_middle_initialize(void);
3142 extern void __kmp_parallel_initialize(void);
3143 
3144 extern void __kmp_internal_begin(void);
3145 extern void __kmp_internal_end_library(int gtid);
3146 extern void __kmp_internal_end_thread(int gtid);
3147 extern void __kmp_internal_end_atexit(void);
3148 extern void __kmp_internal_end_fini(void);
3149 extern void __kmp_internal_end_dtor(void);
3150 extern void __kmp_internal_end_dest(void *);
3151 
3152 extern int __kmp_register_root(int initial_thread);
3153 extern void __kmp_unregister_root(int gtid);
3154 
3155 extern int __kmp_ignore_mppbeg(void);
3156 extern int __kmp_ignore_mppend(void);
3157 
3158 extern int __kmp_enter_single(int gtid, ident_t *id_ref, int push_ws);
3159 extern void __kmp_exit_single(int gtid);
3160 
3161 extern void __kmp_parallel_deo(int *gtid_ref, int *cid_ref, ident_t *loc_ref);
3162 extern void __kmp_parallel_dxo(int *gtid_ref, int *cid_ref, ident_t *loc_ref);
3163 
3164 #ifdef USE_LOAD_BALANCE
3165 extern int __kmp_get_load_balance(int);
3166 #endif
3167 
3168 extern int __kmp_get_global_thread_id(void);
3169 extern int __kmp_get_global_thread_id_reg(void);
3170 extern void __kmp_exit_thread(int exit_status);
3171 extern void __kmp_abort(char const *format, ...);
3172 extern void __kmp_abort_thread(void);
3173 KMP_NORETURN extern void __kmp_abort_process(void);
3174 extern void __kmp_warn(char const *format, ...);
3175 
3176 extern void __kmp_set_num_threads(int new_nth, int gtid);
3177 
3178 // Returns current thread (pointer to kmp_info_t). Current thread *must* be
3179 // registered.
3180 static inline kmp_info_t *__kmp_entry_thread() {
3181  int gtid = __kmp_entry_gtid();
3182 
3183  return __kmp_threads[gtid];
3184 }
3185 
3186 extern void __kmp_set_max_active_levels(int gtid, int new_max_active_levels);
3187 extern int __kmp_get_max_active_levels(int gtid);
3188 extern int __kmp_get_ancestor_thread_num(int gtid, int level);
3189 extern int __kmp_get_team_size(int gtid, int level);
3190 extern void __kmp_set_schedule(int gtid, kmp_sched_t new_sched, int chunk);
3191 extern void __kmp_get_schedule(int gtid, kmp_sched_t *sched, int *chunk);
3192 
3193 extern unsigned short __kmp_get_random(kmp_info_t *thread);
3194 extern void __kmp_init_random(kmp_info_t *thread);
3195 
3196 extern kmp_r_sched_t __kmp_get_schedule_global(void);
3197 extern void __kmp_adjust_num_threads(int new_nproc);
3198 
3199 extern void *___kmp_allocate(size_t size KMP_SRC_LOC_DECL);
3200 extern void *___kmp_page_allocate(size_t size KMP_SRC_LOC_DECL);
3201 extern void ___kmp_free(void *ptr KMP_SRC_LOC_DECL);
3202 #define __kmp_allocate(size) ___kmp_allocate((size)KMP_SRC_LOC_CURR)
3203 #define __kmp_page_allocate(size) ___kmp_page_allocate((size)KMP_SRC_LOC_CURR)
3204 #define __kmp_free(ptr) ___kmp_free((ptr)KMP_SRC_LOC_CURR)
3205 
3206 #if USE_FAST_MEMORY
3207 extern void *___kmp_fast_allocate(kmp_info_t *this_thr,
3208  size_t size KMP_SRC_LOC_DECL);
3209 extern void ___kmp_fast_free(kmp_info_t *this_thr, void *ptr KMP_SRC_LOC_DECL);
3210 extern void __kmp_free_fast_memory(kmp_info_t *this_thr);
3211 extern void __kmp_initialize_fast_memory(kmp_info_t *this_thr);
3212 #define __kmp_fast_allocate(this_thr, size) \
3213  ___kmp_fast_allocate((this_thr), (size)KMP_SRC_LOC_CURR)
3214 #define __kmp_fast_free(this_thr, ptr) \
3215  ___kmp_fast_free((this_thr), (ptr)KMP_SRC_LOC_CURR)
3216 #endif
3217 
3218 extern void *___kmp_thread_malloc(kmp_info_t *th, size_t size KMP_SRC_LOC_DECL);
3219 extern void *___kmp_thread_calloc(kmp_info_t *th, size_t nelem,
3220  size_t elsize KMP_SRC_LOC_DECL);
3221 extern void *___kmp_thread_realloc(kmp_info_t *th, void *ptr,
3222  size_t size KMP_SRC_LOC_DECL);
3223 extern void ___kmp_thread_free(kmp_info_t *th, void *ptr KMP_SRC_LOC_DECL);
3224 #define __kmp_thread_malloc(th, size) \
3225  ___kmp_thread_malloc((th), (size)KMP_SRC_LOC_CURR)
3226 #define __kmp_thread_calloc(th, nelem, elsize) \
3227  ___kmp_thread_calloc((th), (nelem), (elsize)KMP_SRC_LOC_CURR)
3228 #define __kmp_thread_realloc(th, ptr, size) \
3229  ___kmp_thread_realloc((th), (ptr), (size)KMP_SRC_LOC_CURR)
3230 #define __kmp_thread_free(th, ptr) \
3231  ___kmp_thread_free((th), (ptr)KMP_SRC_LOC_CURR)
3232 
3233 #define KMP_INTERNAL_MALLOC(sz) malloc(sz)
3234 #define KMP_INTERNAL_FREE(p) free(p)
3235 #define KMP_INTERNAL_REALLOC(p, sz) realloc((p), (sz))
3236 #define KMP_INTERNAL_CALLOC(n, sz) calloc((n), (sz))
3237 
3238 extern void __kmp_push_num_threads(ident_t *loc, int gtid, int num_threads);
3239 
3240 #if OMP_40_ENABLED
3241 extern void __kmp_push_proc_bind(ident_t *loc, int gtid,
3242  kmp_proc_bind_t proc_bind);
3243 extern void __kmp_push_num_teams(ident_t *loc, int gtid, int num_teams,
3244  int num_threads);
3245 #endif
3246 
3247 extern void __kmp_yield(int cond);
3248 
3249 extern void __kmpc_dispatch_init_4(ident_t *loc, kmp_int32 gtid,
3250  enum sched_type schedule, kmp_int32 lb,
3251  kmp_int32 ub, kmp_int32 st, kmp_int32 chunk);
3252 extern void __kmpc_dispatch_init_4u(ident_t *loc, kmp_int32 gtid,
3253  enum sched_type schedule, kmp_uint32 lb,
3254  kmp_uint32 ub, kmp_int32 st,
3255  kmp_int32 chunk);
3256 extern void __kmpc_dispatch_init_8(ident_t *loc, kmp_int32 gtid,
3257  enum sched_type schedule, kmp_int64 lb,
3258  kmp_int64 ub, kmp_int64 st, kmp_int64 chunk);
3259 extern void __kmpc_dispatch_init_8u(ident_t *loc, kmp_int32 gtid,
3260  enum sched_type schedule, kmp_uint64 lb,
3261  kmp_uint64 ub, kmp_int64 st,
3262  kmp_int64 chunk);
3263 
3264 extern int __kmpc_dispatch_next_4(ident_t *loc, kmp_int32 gtid,
3265  kmp_int32 *p_last, kmp_int32 *p_lb,
3266  kmp_int32 *p_ub, kmp_int32 *p_st);
3267 extern int __kmpc_dispatch_next_4u(ident_t *loc, kmp_int32 gtid,
3268  kmp_int32 *p_last, kmp_uint32 *p_lb,
3269  kmp_uint32 *p_ub, kmp_int32 *p_st);
3270 extern int __kmpc_dispatch_next_8(ident_t *loc, kmp_int32 gtid,
3271  kmp_int32 *p_last, kmp_int64 *p_lb,
3272  kmp_int64 *p_ub, kmp_int64 *p_st);
3273 extern int __kmpc_dispatch_next_8u(ident_t *loc, kmp_int32 gtid,
3274  kmp_int32 *p_last, kmp_uint64 *p_lb,
3275  kmp_uint64 *p_ub, kmp_int64 *p_st);
3276 
3277 extern void __kmpc_dispatch_fini_4(ident_t *loc, kmp_int32 gtid);
3278 extern void __kmpc_dispatch_fini_8(ident_t *loc, kmp_int32 gtid);
3279 extern void __kmpc_dispatch_fini_4u(ident_t *loc, kmp_int32 gtid);
3280 extern void __kmpc_dispatch_fini_8u(ident_t *loc, kmp_int32 gtid);
3281 
3282 #ifdef KMP_GOMP_COMPAT
3283 
3284 extern void __kmp_aux_dispatch_init_4(ident_t *loc, kmp_int32 gtid,
3285  enum sched_type schedule, kmp_int32 lb,
3286  kmp_int32 ub, kmp_int32 st,
3287  kmp_int32 chunk, int push_ws);
3288 extern void __kmp_aux_dispatch_init_4u(ident_t *loc, kmp_int32 gtid,
3289  enum sched_type schedule, kmp_uint32 lb,
3290  kmp_uint32 ub, kmp_int32 st,
3291  kmp_int32 chunk, int push_ws);
3292 extern void __kmp_aux_dispatch_init_8(ident_t *loc, kmp_int32 gtid,
3293  enum sched_type schedule, kmp_int64 lb,
3294  kmp_int64 ub, kmp_int64 st,
3295  kmp_int64 chunk, int push_ws);
3296 extern void __kmp_aux_dispatch_init_8u(ident_t *loc, kmp_int32 gtid,
3297  enum sched_type schedule, kmp_uint64 lb,
3298  kmp_uint64 ub, kmp_int64 st,
3299  kmp_int64 chunk, int push_ws);
3300 extern void __kmp_aux_dispatch_fini_chunk_4(ident_t *loc, kmp_int32 gtid);
3301 extern void __kmp_aux_dispatch_fini_chunk_8(ident_t *loc, kmp_int32 gtid);
3302 extern void __kmp_aux_dispatch_fini_chunk_4u(ident_t *loc, kmp_int32 gtid);
3303 extern void __kmp_aux_dispatch_fini_chunk_8u(ident_t *loc, kmp_int32 gtid);
3304 
3305 #endif /* KMP_GOMP_COMPAT */
3306 
3307 extern kmp_uint32 __kmp_eq_4(kmp_uint32 value, kmp_uint32 checker);
3308 extern kmp_uint32 __kmp_neq_4(kmp_uint32 value, kmp_uint32 checker);
3309 extern kmp_uint32 __kmp_lt_4(kmp_uint32 value, kmp_uint32 checker);
3310 extern kmp_uint32 __kmp_ge_4(kmp_uint32 value, kmp_uint32 checker);
3311 extern kmp_uint32 __kmp_le_4(kmp_uint32 value, kmp_uint32 checker);
3312 extern kmp_uint32 __kmp_wait_yield_4(kmp_uint32 volatile *spinner,
3313  kmp_uint32 checker,
3314  kmp_uint32 (*pred)(kmp_uint32, kmp_uint32),
3315  void *obj);
3316 extern void __kmp_wait_yield_4_ptr(void *spinner, kmp_uint32 checker,
3317  kmp_uint32 (*pred)(void *, kmp_uint32),
3318  void *obj);
3319 
3320 class kmp_flag_32;
3321 class kmp_flag_64;
3322 class kmp_flag_oncore;
3323 extern void __kmp_wait_64(kmp_info_t *this_thr, kmp_flag_64 *flag,
3324  int final_spin
3325 #if USE_ITT_BUILD
3326  ,
3327  void *itt_sync_obj
3328 #endif
3329  );
3330 extern void __kmp_release_64(kmp_flag_64 *flag);
3331 
3332 extern void __kmp_infinite_loop(void);
3333 
3334 extern void __kmp_cleanup(void);
3335 
3336 #if KMP_HANDLE_SIGNALS
3337 extern int __kmp_handle_signals;
3338 extern void __kmp_install_signals(int parallel_init);
3339 extern void __kmp_remove_signals(void);
3340 #endif
3341 
3342 extern void __kmp_clear_system_time(void);
3343 extern void __kmp_read_system_time(double *delta);
3344 
3345 extern void __kmp_check_stack_overlap(kmp_info_t *thr);
3346 
3347 extern void __kmp_expand_host_name(char *buffer, size_t size);
3348 extern void __kmp_expand_file_name(char *result, size_t rlen, char *pattern);
3349 
3350 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
3351 extern void
3352 __kmp_initialize_system_tick(void); /* Initialize timer tick value */
3353 #endif
3354 
3355 extern void
3356 __kmp_runtime_initialize(void); /* machine specific initialization */
3357 extern void __kmp_runtime_destroy(void);
3358 
3359 #if KMP_AFFINITY_SUPPORTED
3360 extern char *__kmp_affinity_print_mask(char *buf, int buf_len,
3361  kmp_affin_mask_t *mask);
3362 extern void __kmp_affinity_initialize(void);
3363 extern void __kmp_affinity_uninitialize(void);
3364 extern void __kmp_affinity_set_init_mask(
3365  int gtid, int isa_root); /* set affinity according to KMP_AFFINITY */
3366 #if OMP_40_ENABLED
3367 extern void __kmp_affinity_set_place(int gtid);
3368 #endif
3369 extern void __kmp_affinity_determine_capable(const char *env_var);
3370 extern int __kmp_aux_set_affinity(void **mask);
3371 extern int __kmp_aux_get_affinity(void **mask);
3372 extern int __kmp_aux_get_affinity_max_proc();
3373 extern int __kmp_aux_set_affinity_mask_proc(int proc, void **mask);
3374 extern int __kmp_aux_unset_affinity_mask_proc(int proc, void **mask);
3375 extern int __kmp_aux_get_affinity_mask_proc(int proc, void **mask);
3376 extern void __kmp_balanced_affinity(kmp_info_t *th, int team_size);
3377 #if KMP_OS_LINUX
3378 extern int kmp_set_thread_affinity_mask_initial(void);
3379 #endif
3380 #endif /* KMP_AFFINITY_SUPPORTED */
3381 
3382 extern void __kmp_cleanup_hierarchy();
3383 extern void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar);
3384 
3385 #if KMP_USE_FUTEX
3386 
3387 extern int __kmp_futex_determine_capable(void);
3388 
3389 #endif // KMP_USE_FUTEX
3390 
3391 extern void __kmp_gtid_set_specific(int gtid);
3392 extern int __kmp_gtid_get_specific(void);
3393 
3394 extern double __kmp_read_cpu_time(void);
3395 
3396 extern int __kmp_read_system_info(struct kmp_sys_info *info);
3397 
3398 #if KMP_USE_MONITOR
3399 extern void __kmp_create_monitor(kmp_info_t *th);
3400 #endif
3401 
3402 extern void *__kmp_launch_thread(kmp_info_t *thr);
3403 
3404 extern void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size);
3405 
3406 #if KMP_OS_WINDOWS
3407 extern int __kmp_still_running(kmp_info_t *th);
3408 extern int __kmp_is_thread_alive(kmp_info_t *th, DWORD *exit_val);
3409 extern void __kmp_free_handle(kmp_thread_t tHandle);
3410 #endif
3411 
3412 #if KMP_USE_MONITOR
3413 extern void __kmp_reap_monitor(kmp_info_t *th);
3414 #endif
3415 extern void __kmp_reap_worker(kmp_info_t *th);
3416 extern void __kmp_terminate_thread(int gtid);
3417 
3418 extern void __kmp_suspend_32(int th_gtid, kmp_flag_32 *flag);
3419 extern void __kmp_suspend_64(int th_gtid, kmp_flag_64 *flag);
3420 extern void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag);
3421 extern void __kmp_resume_32(int target_gtid, kmp_flag_32 *flag);
3422 extern void __kmp_resume_64(int target_gtid, kmp_flag_64 *flag);
3423 extern void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag);
3424 
3425 extern void __kmp_elapsed(double *);
3426 extern void __kmp_elapsed_tick(double *);
3427 
3428 extern void __kmp_enable(int old_state);
3429 extern void __kmp_disable(int *old_state);
3430 
3431 extern void __kmp_thread_sleep(int millis);
3432 
3433 extern void __kmp_common_initialize(void);
3434 extern void __kmp_common_destroy(void);
3435 extern void __kmp_common_destroy_gtid(int gtid);
3436 
3437 #if KMP_OS_UNIX
3438 extern void __kmp_register_atfork(void);
3439 #endif
3440 extern void __kmp_suspend_initialize(void);
3441 extern void __kmp_suspend_uninitialize_thread(kmp_info_t *th);
3442 
3443 extern kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
3444  int tid);
3445 #if OMP_40_ENABLED
3446 extern kmp_team_t *
3447 __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
3448 #if OMPT_SUPPORT
3449  ompt_data_t ompt_parallel_data,
3450 #endif
3451  kmp_proc_bind_t proc_bind, kmp_internal_control_t *new_icvs,
3452  int argc USE_NESTED_HOT_ARG(kmp_info_t *thr));
3453 #else
3454 extern kmp_team_t *
3455 __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
3456 #if OMPT_SUPPORT
3457  ompt_id_t ompt_parallel_id,
3458 #endif
3459  kmp_internal_control_t *new_icvs,
3460  int argc USE_NESTED_HOT_ARG(kmp_info_t *thr));
3461 #endif // OMP_40_ENABLED
3462 extern void __kmp_free_thread(kmp_info_t *);
3463 extern void __kmp_free_team(kmp_root_t *,
3464  kmp_team_t *USE_NESTED_HOT_ARG(kmp_info_t *));
3465 extern kmp_team_t *__kmp_reap_team(kmp_team_t *);
3466 
3467 /* ------------------------------------------------------------------------ */
3468 
3469 extern void __kmp_initialize_bget(kmp_info_t *th);
3470 extern void __kmp_finalize_bget(kmp_info_t *th);
3471 
3472 KMP_EXPORT void *kmpc_malloc(size_t size);
3473 KMP_EXPORT void *kmpc_aligned_malloc(size_t size, size_t alignment);
3474 KMP_EXPORT void *kmpc_calloc(size_t nelem, size_t elsize);
3475 KMP_EXPORT void *kmpc_realloc(void *ptr, size_t size);
3476 KMP_EXPORT void kmpc_free(void *ptr);
3477 
3478 /* declarations for internal use */
3479 
3480 extern int __kmp_barrier(enum barrier_type bt, int gtid, int is_split,
3481  size_t reduce_size, void *reduce_data,
3482  void (*reduce)(void *, void *));
3483 extern void __kmp_end_split_barrier(enum barrier_type bt, int gtid);
3484 
3489 enum fork_context_e {
3490  fork_context_gnu,
3492  fork_context_intel,
3493  fork_context_last
3494 };
3495 extern int __kmp_fork_call(ident_t *loc, int gtid,
3496  enum fork_context_e fork_context, kmp_int32 argc,
3497  microtask_t microtask, launch_t invoker,
3498 /* TODO: revert workaround for Intel(R) 64 tracker #96 */
3499 #if (KMP_ARCH_ARM || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64) && KMP_OS_LINUX
3500  va_list *ap
3501 #else
3502  va_list ap
3503 #endif
3504  );
3505 
3506 extern void __kmp_join_call(ident_t *loc, int gtid
3507 #if OMPT_SUPPORT
3508  ,
3509  enum fork_context_e fork_context
3510 #endif
3511 #if OMP_40_ENABLED
3512  ,
3513  int exit_teams = 0
3514 #endif
3515  );
3516 
3517 extern void __kmp_serialized_parallel(ident_t *id, kmp_int32 gtid);
3518 extern void __kmp_internal_fork(ident_t *id, int gtid, kmp_team_t *team);
3519 extern void __kmp_internal_join(ident_t *id, int gtid, kmp_team_t *team);
3520 extern int __kmp_invoke_task_func(int gtid);
3521 extern void __kmp_run_before_invoked_task(int gtid, int tid,
3522  kmp_info_t *this_thr,
3523  kmp_team_t *team);
3524 extern void __kmp_run_after_invoked_task(int gtid, int tid,
3525  kmp_info_t *this_thr,
3526  kmp_team_t *team);
3527 
3528 // should never have been exported
3529 KMP_EXPORT int __kmpc_invoke_task_func(int gtid);
3530 #if OMP_40_ENABLED
3531 extern int __kmp_invoke_teams_master(int gtid);
3532 extern void __kmp_teams_master(int gtid);
3533 #endif
3534 extern void __kmp_save_internal_controls(kmp_info_t *thread);
3535 extern void __kmp_user_set_library(enum library_type arg);
3536 extern void __kmp_aux_set_library(enum library_type arg);
3537 extern void __kmp_aux_set_stacksize(size_t arg);
3538 extern void __kmp_aux_set_blocktime(int arg, kmp_info_t *thread, int tid);
3539 extern void __kmp_aux_set_defaults(char const *str, int len);
3540 
3541 /* Functions called from __kmp_aux_env_initialize() in kmp_settings.cpp */
3542 void kmpc_set_blocktime(int arg);
3543 void ompc_set_nested(int flag);
3544 void ompc_set_dynamic(int flag);
3545 void ompc_set_num_threads(int arg);
3546 
3547 extern void __kmp_push_current_task_to_thread(kmp_info_t *this_thr,
3548  kmp_team_t *team, int tid);
3549 extern void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr);
3550 extern kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
3551  kmp_tasking_flags_t *flags,
3552  size_t sizeof_kmp_task_t,
3553  size_t sizeof_shareds,
3554  kmp_routine_entry_t task_entry);
3555 extern void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr,
3556  kmp_team_t *team, int tid,
3557  int set_curr_task);
3558 extern void __kmp_finish_implicit_task(kmp_info_t *this_thr);
3559 extern void __kmp_free_implicit_task(kmp_info_t *this_thr);
3560 int __kmp_execute_tasks_32(kmp_info_t *thread, kmp_int32 gtid,
3561  kmp_flag_32 *flag, int final_spin,
3562  int *thread_finished,
3563 #if USE_ITT_BUILD
3564  void *itt_sync_obj,
3565 #endif /* USE_ITT_BUILD */
3566  kmp_int32 is_constrained);
3567 int __kmp_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid,
3568  kmp_flag_64 *flag, int final_spin,
3569  int *thread_finished,
3570 #if USE_ITT_BUILD
3571  void *itt_sync_obj,
3572 #endif /* USE_ITT_BUILD */
3573  kmp_int32 is_constrained);
3574 int __kmp_execute_tasks_oncore(kmp_info_t *thread, kmp_int32 gtid,
3575  kmp_flag_oncore *flag, int final_spin,
3576  int *thread_finished,
3577 #if USE_ITT_BUILD
3578  void *itt_sync_obj,
3579 #endif /* USE_ITT_BUILD */
3580  kmp_int32 is_constrained);
3581 
3582 extern void __kmp_free_task_team(kmp_info_t *thread,
3583  kmp_task_team_t *task_team);
3584 extern void __kmp_reap_task_teams(void);
3585 extern void __kmp_wait_to_unref_task_teams(void);
3586 extern void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team,
3587  int always);
3588 extern void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team);
3589 extern void __kmp_task_team_wait(kmp_info_t *this_thr, kmp_team_t *team
3590 #if USE_ITT_BUILD
3591  ,
3592  void *itt_sync_obj
3593 #endif /* USE_ITT_BUILD */
3594  ,
3595  int wait = 1);
3596 extern void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread,
3597  int gtid);
3598 
3599 extern int __kmp_is_address_mapped(void *addr);
3600 extern kmp_uint64 __kmp_hardware_timestamp(void);
3601 
3602 #if KMP_OS_UNIX
3603 extern int __kmp_read_from_file(char const *path, char const *format, ...);
3604 #endif
3605 
3606 /* ------------------------------------------------------------------------ */
3607 //
3608 // Assembly routines that have no compiler intrinsic replacement
3609 //
3610 
3611 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
3612 
3613 extern void __kmp_query_cpuid(kmp_cpuinfo_t *p);
3614 
3615 #define __kmp_load_mxcsr(p) _mm_setcsr(*(p))
3616 static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = _mm_getcsr(); }
3617 
3618 extern void __kmp_load_x87_fpu_control_word(kmp_int16 *p);
3619 extern void __kmp_store_x87_fpu_control_word(kmp_int16 *p);
3620 extern void __kmp_clear_x87_fpu_status_word();
3621 #define KMP_X86_MXCSR_MASK 0xffffffc0 /* ignore status flags (6 lsb) */
3622 
3623 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
3624 
3625 extern int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int npr, int argc,
3626  void *argv[]
3627 #if OMPT_SUPPORT
3628  ,
3629  void **exit_frame_ptr
3630 #endif
3631  );
3632 
3633 /* ------------------------------------------------------------------------ */
3634 
3635 KMP_EXPORT void __kmpc_begin(ident_t *, kmp_int32 flags);
3636 KMP_EXPORT void __kmpc_end(ident_t *);
3637 
3638 KMP_EXPORT void __kmpc_threadprivate_register_vec(ident_t *, void *data,
3639  kmpc_ctor_vec ctor,
3640  kmpc_cctor_vec cctor,
3641  kmpc_dtor_vec dtor,
3642  size_t vector_length);
3643 KMP_EXPORT void __kmpc_threadprivate_register(ident_t *, void *data,
3644  kmpc_ctor ctor, kmpc_cctor cctor,
3645  kmpc_dtor dtor);
3646 KMP_EXPORT void *__kmpc_threadprivate(ident_t *, kmp_int32 global_tid,
3647  void *data, size_t size);
3648 
3649 KMP_EXPORT kmp_int32 __kmpc_global_thread_num(ident_t *);
3650 KMP_EXPORT kmp_int32 __kmpc_global_num_threads(ident_t *);
3651 KMP_EXPORT kmp_int32 __kmpc_bound_thread_num(ident_t *);
3652 KMP_EXPORT kmp_int32 __kmpc_bound_num_threads(ident_t *);
3653 
3654 KMP_EXPORT kmp_int32 __kmpc_ok_to_fork(ident_t *);
3655 KMP_EXPORT void __kmpc_fork_call(ident_t *, kmp_int32 nargs,
3656  kmpc_micro microtask, ...);
3657 
3658 KMP_EXPORT void __kmpc_serialized_parallel(ident_t *, kmp_int32 global_tid);
3659 KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid);
3660 
3661 KMP_EXPORT void __kmpc_flush(ident_t *);
3662 KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid);
3663 KMP_EXPORT kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
3664 KMP_EXPORT void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
3665 KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid);
3666 KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid);
3667 KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid,
3668  kmp_critical_name *);
3669 KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid,
3670  kmp_critical_name *);
3671 
3672 #if OMP_45_ENABLED
3673 KMP_EXPORT void __kmpc_critical_with_hint(ident_t *, kmp_int32 global_tid,
3674  kmp_critical_name *, uint32_t hint);
3675 #endif
3676 
3677 KMP_EXPORT kmp_int32 __kmpc_barrier_master(ident_t *, kmp_int32 global_tid);
3678 KMP_EXPORT void __kmpc_end_barrier_master(ident_t *, kmp_int32 global_tid);
3679 
3680 KMP_EXPORT kmp_int32 __kmpc_barrier_master_nowait(ident_t *,
3681  kmp_int32 global_tid);
3682 
3683 KMP_EXPORT kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
3684 KMP_EXPORT void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
3685 
3686 KMP_EXPORT void KMPC_FOR_STATIC_INIT(ident_t *loc, kmp_int32 global_tid,
3687  kmp_int32 schedtype, kmp_int32 *plastiter,
3688  kmp_int *plower, kmp_int *pupper,
3689  kmp_int *pstride, kmp_int incr,
3690  kmp_int chunk);
3691 
3692 KMP_EXPORT void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
3693 
3694 KMP_EXPORT void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
3695  size_t cpy_size, void *cpy_data,
3696  void (*cpy_func)(void *, void *),
3697  kmp_int32 didit);
3698 
3699 extern void KMPC_SET_NUM_THREADS(int arg);
3700 extern void KMPC_SET_DYNAMIC(int flag);
3701 extern void KMPC_SET_NESTED(int flag);
3702 
3703 /* Taskq interface routines */
3704 KMP_EXPORT kmpc_thunk_t *__kmpc_taskq(ident_t *loc, kmp_int32 global_tid,
3705  kmpc_task_t taskq_task,
3706  size_t sizeof_thunk,
3707  size_t sizeof_shareds, kmp_int32 flags,
3708  kmpc_shared_vars_t **shareds);
3709 KMP_EXPORT void __kmpc_end_taskq(ident_t *loc, kmp_int32 global_tid,
3710  kmpc_thunk_t *thunk);
3711 KMP_EXPORT kmp_int32 __kmpc_task(ident_t *loc, kmp_int32 global_tid,
3712  kmpc_thunk_t *thunk);
3713 KMP_EXPORT void __kmpc_taskq_task(ident_t *loc, kmp_int32 global_tid,
3714  kmpc_thunk_t *thunk, kmp_int32 status);
3715 KMP_EXPORT void __kmpc_end_taskq_task(ident_t *loc, kmp_int32 global_tid,
3716  kmpc_thunk_t *thunk);
3717 KMP_EXPORT kmpc_thunk_t *__kmpc_task_buffer(ident_t *loc, kmp_int32 global_tid,
3718  kmpc_thunk_t *taskq_thunk,
3719  kmpc_task_t task);
3720 
3721 /* OMP 3.0 tasking interface routines */
3722 KMP_EXPORT kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid,
3723  kmp_task_t *new_task);
3724 KMP_EXPORT kmp_task_t *__kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
3725  kmp_int32 flags,
3726  size_t sizeof_kmp_task_t,
3727  size_t sizeof_shareds,
3728  kmp_routine_entry_t task_entry);
3729 KMP_EXPORT void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid,
3730  kmp_task_t *task);
3731 KMP_EXPORT void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid,
3732  kmp_task_t *task);
3733 KMP_EXPORT kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid,
3734  kmp_task_t *new_task);
3735 KMP_EXPORT kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid);
3736 
3737 KMP_EXPORT kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid,
3738  int end_part);
3739 
3740 #if TASK_UNUSED
3741 void __kmpc_omp_task_begin(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task);
3742 void __kmpc_omp_task_complete(ident_t *loc_ref, kmp_int32 gtid,
3743  kmp_task_t *task);
3744 #endif // TASK_UNUSED
3745 
3746 /* ------------------------------------------------------------------------ */
3747 
3748 #if OMP_40_ENABLED
3749 
3750 KMP_EXPORT void __kmpc_taskgroup(ident_t *loc, int gtid);
3751 KMP_EXPORT void __kmpc_end_taskgroup(ident_t *loc, int gtid);
3752 
3753 KMP_EXPORT kmp_int32 __kmpc_omp_task_with_deps(
3754  ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps,
3755  kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
3756  kmp_depend_info_t *noalias_dep_list);
3757 KMP_EXPORT void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid,
3758  kmp_int32 ndeps,
3759  kmp_depend_info_t *dep_list,
3760  kmp_int32 ndeps_noalias,
3761  kmp_depend_info_t *noalias_dep_list);
3762 
3763 extern kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
3764  bool serialize_immediate);
3765 
3766 KMP_EXPORT kmp_int32 __kmpc_cancel(ident_t *loc_ref, kmp_int32 gtid,
3767  kmp_int32 cncl_kind);
3768 KMP_EXPORT kmp_int32 __kmpc_cancellationpoint(ident_t *loc_ref, kmp_int32 gtid,
3769  kmp_int32 cncl_kind);
3770 KMP_EXPORT kmp_int32 __kmpc_cancel_barrier(ident_t *loc_ref, kmp_int32 gtid);
3771 KMP_EXPORT int __kmp_get_cancellation_status(int cancel_kind);
3772 
3773 #if OMP_45_ENABLED
3774 
3775 KMP_EXPORT void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask);
3776 KMP_EXPORT void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask);
3777 KMP_EXPORT void __kmpc_taskloop(ident_t *loc, kmp_int32 gtid, kmp_task_t *task,
3778  kmp_int32 if_val, kmp_uint64 *lb,
3779  kmp_uint64 *ub, kmp_int64 st, kmp_int32 nogroup,
3780  kmp_int32 sched, kmp_uint64 grainsize,
3781  void *task_dup);
3782 #endif
3783 #if OMP_50_ENABLED
3784 KMP_EXPORT void *__kmpc_task_reduction_init(int gtid, int num_data, void *data);
3785 KMP_EXPORT void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void *d);
3786 #endif
3787 
3788 #endif
3789 
3790 /* Lock interface routines (fast versions with gtid passed in) */
3791 KMP_EXPORT void __kmpc_init_lock(ident_t *loc, kmp_int32 gtid,
3792  void **user_lock);
3793 KMP_EXPORT void __kmpc_init_nest_lock(ident_t *loc, kmp_int32 gtid,
3794  void **user_lock);
3795 KMP_EXPORT void __kmpc_destroy_lock(ident_t *loc, kmp_int32 gtid,
3796  void **user_lock);
3797 KMP_EXPORT void __kmpc_destroy_nest_lock(ident_t *loc, kmp_int32 gtid,
3798  void **user_lock);
3799 KMP_EXPORT void __kmpc_set_lock(ident_t *loc, kmp_int32 gtid, void **user_lock);
3800 KMP_EXPORT void __kmpc_set_nest_lock(ident_t *loc, kmp_int32 gtid,
3801  void **user_lock);
3802 KMP_EXPORT void __kmpc_unset_lock(ident_t *loc, kmp_int32 gtid,
3803  void **user_lock);
3804 KMP_EXPORT void __kmpc_unset_nest_lock(ident_t *loc, kmp_int32 gtid,
3805  void **user_lock);
3806 KMP_EXPORT int __kmpc_test_lock(ident_t *loc, kmp_int32 gtid, void **user_lock);
3807 KMP_EXPORT int __kmpc_test_nest_lock(ident_t *loc, kmp_int32 gtid,
3808  void **user_lock);
3809 
3810 #if OMP_45_ENABLED
3811 KMP_EXPORT void __kmpc_init_lock_with_hint(ident_t *loc, kmp_int32 gtid,
3812  void **user_lock, uintptr_t hint);
3813 KMP_EXPORT void __kmpc_init_nest_lock_with_hint(ident_t *loc, kmp_int32 gtid,
3814  void **user_lock,
3815  uintptr_t hint);
3816 #endif
3817 
3818 /* Interface to fast scalable reduce methods routines */
3819 
3820 KMP_EXPORT kmp_int32 __kmpc_reduce_nowait(
3821  ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size,
3822  void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
3823  kmp_critical_name *lck);
3824 KMP_EXPORT void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
3825  kmp_critical_name *lck);
3826 KMP_EXPORT kmp_int32 __kmpc_reduce(
3827  ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size,
3828  void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
3829  kmp_critical_name *lck);
3830 KMP_EXPORT void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
3831  kmp_critical_name *lck);
3832 
3833 /* Internal fast reduction routines */
3834 
3835 extern PACKED_REDUCTION_METHOD_T __kmp_determine_reduction_method(
3836  ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size,
3837  void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
3838  kmp_critical_name *lck);
3839 
3840 // this function is for testing set/get/determine reduce method
3841 KMP_EXPORT kmp_int32 __kmp_get_reduce_method(void);
3842 
3843 KMP_EXPORT kmp_uint64 __kmpc_get_taskid();
3844 KMP_EXPORT kmp_uint64 __kmpc_get_parent_taskid();
3845 
3846 // C++ port
3847 // missing 'extern "C"' declarations
3848 
3849 KMP_EXPORT kmp_int32 __kmpc_in_parallel(ident_t *loc);
3850 KMP_EXPORT void __kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid);
3851 KMP_EXPORT void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
3852  kmp_int32 num_threads);
3853 
3854 #if OMP_40_ENABLED
3855 KMP_EXPORT void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
3856  int proc_bind);
3857 KMP_EXPORT void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
3858  kmp_int32 num_teams,
3859  kmp_int32 num_threads);
3860 KMP_EXPORT void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc,
3861  kmpc_micro microtask, ...);
3862 #endif
3863 #if OMP_45_ENABLED
3864 struct kmp_dim { // loop bounds info casted to kmp_int64
3865  kmp_int64 lo; // lower
3866  kmp_int64 up; // upper
3867  kmp_int64 st; // stride
3868 };
3869 KMP_EXPORT void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid,
3870  kmp_int32 num_dims,
3871  const struct kmp_dim *dims);
3872 KMP_EXPORT void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid,
3873  const kmp_int64 *vec);
3874 KMP_EXPORT void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid,
3875  const kmp_int64 *vec);
3876 KMP_EXPORT void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
3877 #endif
3878 
3879 KMP_EXPORT void *__kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid,
3880  void *data, size_t size,
3881  void ***cache);
3882 
3883 // Symbols for MS mutual detection.
3884 extern int _You_must_link_with_exactly_one_OpenMP_library;
3885 extern int _You_must_link_with_Intel_OpenMP_library;
3886 #if KMP_OS_WINDOWS && (KMP_VERSION_MAJOR > 4)
3887 extern int _You_must_link_with_Microsoft_OpenMP_library;
3888 #endif
3889 
3890 // The routines below are not exported.
3891 // Consider making them 'static' in corresponding source files.
3892 void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr,
3893  void *data_addr, size_t pc_size);
3894 struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr,
3895  void *data_addr,
3896  size_t pc_size);
3897 void __kmp_threadprivate_resize_cache(int newCapacity);
3898 void __kmp_cleanup_threadprivate_caches();
3899 
3900 // ompc_, kmpc_ entries moved from omp.h.
3901 #if KMP_OS_WINDOWS
3902 #define KMPC_CONVENTION __cdecl
3903 #else
3904 #define KMPC_CONVENTION
3905 #endif
3906 
3907 #ifndef __OMP_H
3908 typedef enum omp_sched_t {
3909  omp_sched_static = 1,
3910  omp_sched_dynamic = 2,
3911  omp_sched_guided = 3,
3912  omp_sched_auto = 4
3913 } omp_sched_t;
3914 typedef void *kmp_affinity_mask_t;
3915 #endif
3916 
3917 KMP_EXPORT void KMPC_CONVENTION ompc_set_max_active_levels(int);
3918 KMP_EXPORT void KMPC_CONVENTION ompc_set_schedule(omp_sched_t, int);
3919 KMP_EXPORT int KMPC_CONVENTION ompc_get_ancestor_thread_num(int);
3920 KMP_EXPORT int KMPC_CONVENTION ompc_get_team_size(int);
3921 KMP_EXPORT int KMPC_CONVENTION
3922 kmpc_set_affinity_mask_proc(int, kmp_affinity_mask_t *);
3923 KMP_EXPORT int KMPC_CONVENTION
3924 kmpc_unset_affinity_mask_proc(int, kmp_affinity_mask_t *);
3925 KMP_EXPORT int KMPC_CONVENTION
3926 kmpc_get_affinity_mask_proc(int, kmp_affinity_mask_t *);
3927 
3928 KMP_EXPORT void KMPC_CONVENTION kmpc_set_stacksize(int);
3929 KMP_EXPORT void KMPC_CONVENTION kmpc_set_stacksize_s(size_t);
3930 KMP_EXPORT void KMPC_CONVENTION kmpc_set_library(int);
3931 KMP_EXPORT void KMPC_CONVENTION kmpc_set_defaults(char const *);
3932 KMP_EXPORT void KMPC_CONVENTION kmpc_set_disp_num_buffers(int);
3933 
3934 #if OMP_50_ENABLED
3935 enum kmp_target_offload_kind {
3936  tgt_disabled = 0,
3937  tgt_default = 1,
3938  tgt_mandatory = 2
3939 };
3940 typedef enum kmp_target_offload_kind kmp_target_offload_kind_t;
3941 // Set via OMP_TARGET_OFFLOAD if specified, defaults to tgt_default otherwise
3942 extern kmp_target_offload_kind_t __kmp_target_offload;
3943 extern int __kmpc_get_target_offload();
3944 #endif
3945 
3946 #ifdef __cplusplus
3947 }
3948 #endif
3949 
3950 #endif /* KMP_H */
KMP_EXPORT kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_barrier_master(ident_t *, kmp_int32 global_tid)
kmp_int32 reserved_2
Definition: kmp.h:223
void __kmpc_dispatch_fini_4(ident_t *loc, kmp_int32 gtid)
KMP_EXPORT void __kmpc_end_single(ident_t *, kmp_int32 global_tid)
void(* kmpc_dtor)(void *)
Definition: kmp.h:1422
void __kmpc_dispatch_init_4(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int32 lb, kmp_int32 ub, kmp_int32 st, kmp_int32 chunk)
KMP_EXPORT kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
KMP_EXPORT kmp_int32 __kmpc_global_thread_num(ident_t *)
int __kmpc_dispatch_next_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st)
void(* kmpc_dtor_vec)(void *, size_t)
Definition: kmp.h:1445
KMP_EXPORT void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
kmp_int32 reserved_1
Definition: kmp.h:220
void *(* kmpc_ctor_vec)(void *, size_t)
Definition: kmp.h:1439
KMP_EXPORT void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
kmp_int32 reserved_3
Definition: kmp.h:228
void *(* kmpc_cctor_vec)(void *, void *, size_t)
Definition: kmp.h:1451
KMP_EXPORT void __kmpc_flush(ident_t *)
void __kmpc_dispatch_init_8u(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_uint64 lb, kmp_uint64 ub, kmp_int64 st, kmp_int64 chunk)
KMP_EXPORT kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid)
int __kmpc_dispatch_next_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st)
KMP_EXPORT void __kmpc_end(ident_t *)
KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid)
void *(* kmpc_cctor)(void *, void *)
Definition: kmp.h:1429
KMP_EXPORT void __kmpc_threadprivate_register(ident_t *, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
KMP_EXPORT kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT void __kmpc_begin(ident_t *, kmp_int32 flags)
KMP_EXPORT kmp_int32 __kmpc_bound_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
int __kmpc_dispatch_next_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st)
KMP_EXPORT void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *), kmp_int32 didit)
KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid)
sched_type
Definition: kmp.h:332
KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
Definition: kmp.h:219
KMP_EXPORT void __kmpc_end_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask,...)
KMP_EXPORT kmp_int32 __kmpc_in_parallel(ident_t *loc)
KMP_EXPORT kmp_int32 __kmpc_ok_to_fork(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_global_num_threads(ident_t *)
void __kmpc_dispatch_fini_8u(ident_t *loc, kmp_int32 gtid)
KMP_EXPORT kmp_int32 __kmpc_bound_num_threads(ident_t *)
KMP_EXPORT void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
void __kmpc_dispatch_fini_4u(ident_t *loc, kmp_int32 gtid)
KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
void *(* kmpc_ctor)(void *)
Definition: kmp.h:1416
KMP_EXPORT void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads)
void __kmpc_dispatch_fini_8(ident_t *loc, kmp_int32 gtid)
void __kmpc_dispatch_init_4u(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_uint32 lb, kmp_uint32 ub, kmp_int32 st, kmp_int32 chunk)
void(* kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
Definition: kmp.h:1398
KMP_EXPORT kmp_int32 __kmpc_barrier_master_nowait(ident_t *, kmp_int32 global_tid)
int __kmpc_dispatch_next_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st)
KMP_EXPORT void __kmpc_serialized_parallel(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_fork_call(ident_t *, kmp_int32 nargs, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_threadprivate_register_vec(ident_t *, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)
char const * psource
Definition: kmp.h:229
void __kmpc_dispatch_init_8(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int64 lb, kmp_int64 ub, kmp_int64 st, kmp_int64 chunk)
kmp_int32 flags
Definition: kmp.h:221
struct ident ident_t