LLVM OpenMP* Runtime Library
kmp_tasking.cpp
1 /*
2  * kmp_tasking.cpp -- OpenMP 3.0 tasking support.
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // The LLVM Compiler Infrastructure
8 //
9 // This file is dual licensed under the MIT and the University of Illinois Open
10 // Source Licenses. See LICENSE.txt for details.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "kmp.h"
15 #include "kmp_i18n.h"
16 #include "kmp_itt.h"
17 #include "kmp_stats.h"
18 #include "kmp_wait_release.h"
19 #include "kmp_taskdeps.h"
20 
21 #if OMPT_SUPPORT
22 #include "ompt-specific.h"
23 #endif
24 
25 #include "tsan_annotations.h"
26 
27 /* forward declaration */
28 static void __kmp_enable_tasking(kmp_task_team_t *task_team,
29  kmp_info_t *this_thr);
30 static void __kmp_alloc_task_deque(kmp_info_t *thread,
31  kmp_thread_data_t *thread_data);
32 static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
33  kmp_task_team_t *task_team);
34 
35 #if OMP_45_ENABLED
36 static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask);
37 #endif
38 
39 #ifdef BUILD_TIED_TASK_STACK
40 
41 // __kmp_trace_task_stack: print the tied tasks from the task stack in order
42 // from top do bottom
43 //
44 // gtid: global thread identifier for thread containing stack
45 // thread_data: thread data for task team thread containing stack
46 // threshold: value above which the trace statement triggers
47 // location: string identifying call site of this function (for trace)
48 static void __kmp_trace_task_stack(kmp_int32 gtid,
49  kmp_thread_data_t *thread_data,
50  int threshold, char *location) {
51  kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
52  kmp_taskdata_t **stack_top = task_stack->ts_top;
53  kmp_int32 entries = task_stack->ts_entries;
54  kmp_taskdata_t *tied_task;
55 
56  KA_TRACE(
57  threshold,
58  ("__kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, "
59  "first_block = %p, stack_top = %p \n",
60  location, gtid, entries, task_stack->ts_first_block, stack_top));
61 
62  KMP_DEBUG_ASSERT(stack_top != NULL);
63  KMP_DEBUG_ASSERT(entries > 0);
64 
65  while (entries != 0) {
66  KMP_DEBUG_ASSERT(stack_top != &task_stack->ts_first_block.sb_block[0]);
67  // fix up ts_top if we need to pop from previous block
68  if (entries & TASK_STACK_INDEX_MASK == 0) {
69  kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(stack_top);
70 
71  stack_block = stack_block->sb_prev;
72  stack_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
73  }
74 
75  // finish bookkeeping
76  stack_top--;
77  entries--;
78 
79  tied_task = *stack_top;
80 
81  KMP_DEBUG_ASSERT(tied_task != NULL);
82  KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
83 
84  KA_TRACE(threshold,
85  ("__kmp_trace_task_stack(%s): gtid=%d, entry=%d, "
86  "stack_top=%p, tied_task=%p\n",
87  location, gtid, entries, stack_top, tied_task));
88  }
89  KMP_DEBUG_ASSERT(stack_top == &task_stack->ts_first_block.sb_block[0]);
90 
91  KA_TRACE(threshold,
92  ("__kmp_trace_task_stack(exit): location = %s, gtid = %d\n",
93  location, gtid));
94 }
95 
96 // __kmp_init_task_stack: initialize the task stack for the first time
97 // after a thread_data structure is created.
98 // It should not be necessary to do this again (assuming the stack works).
99 //
100 // gtid: global thread identifier of calling thread
101 // thread_data: thread data for task team thread containing stack
102 static void __kmp_init_task_stack(kmp_int32 gtid,
103  kmp_thread_data_t *thread_data) {
104  kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
105  kmp_stack_block_t *first_block;
106 
107  // set up the first block of the stack
108  first_block = &task_stack->ts_first_block;
109  task_stack->ts_top = (kmp_taskdata_t **)first_block;
110  memset((void *)first_block, '\0',
111  TASK_STACK_BLOCK_SIZE * sizeof(kmp_taskdata_t *));
112 
113  // initialize the stack to be empty
114  task_stack->ts_entries = TASK_STACK_EMPTY;
115  first_block->sb_next = NULL;
116  first_block->sb_prev = NULL;
117 }
118 
119 // __kmp_free_task_stack: free the task stack when thread_data is destroyed.
120 //
121 // gtid: global thread identifier for calling thread
122 // thread_data: thread info for thread containing stack
123 static void __kmp_free_task_stack(kmp_int32 gtid,
124  kmp_thread_data_t *thread_data) {
125  kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
126  kmp_stack_block_t *stack_block = &task_stack->ts_first_block;
127 
128  KMP_DEBUG_ASSERT(task_stack->ts_entries == TASK_STACK_EMPTY);
129  // free from the second block of the stack
130  while (stack_block != NULL) {
131  kmp_stack_block_t *next_block = (stack_block) ? stack_block->sb_next : NULL;
132 
133  stack_block->sb_next = NULL;
134  stack_block->sb_prev = NULL;
135  if (stack_block != &task_stack->ts_first_block) {
136  __kmp_thread_free(thread,
137  stack_block); // free the block, if not the first
138  }
139  stack_block = next_block;
140  }
141  // initialize the stack to be empty
142  task_stack->ts_entries = 0;
143  task_stack->ts_top = NULL;
144 }
145 
146 // __kmp_push_task_stack: Push the tied task onto the task stack.
147 // Grow the stack if necessary by allocating another block.
148 //
149 // gtid: global thread identifier for calling thread
150 // thread: thread info for thread containing stack
151 // tied_task: the task to push on the stack
152 static void __kmp_push_task_stack(kmp_int32 gtid, kmp_info_t *thread,
153  kmp_taskdata_t *tied_task) {
154  // GEH - need to consider what to do if tt_threads_data not allocated yet
155  kmp_thread_data_t *thread_data =
156  &thread->th.th_task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
157  kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
158 
159  if (tied_task->td_flags.team_serial || tied_task->td_flags.tasking_ser) {
160  return; // Don't push anything on stack if team or team tasks are serialized
161  }
162 
163  KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
164  KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
165 
166  KA_TRACE(20,
167  ("__kmp_push_task_stack(enter): GTID: %d; THREAD: %p; TASK: %p\n",
168  gtid, thread, tied_task));
169  // Store entry
170  *(task_stack->ts_top) = tied_task;
171 
172  // Do bookkeeping for next push
173  task_stack->ts_top++;
174  task_stack->ts_entries++;
175 
176  if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
177  // Find beginning of this task block
178  kmp_stack_block_t *stack_block =
179  (kmp_stack_block_t *)(task_stack->ts_top - TASK_STACK_BLOCK_SIZE);
180 
181  // Check if we already have a block
182  if (stack_block->sb_next !=
183  NULL) { // reset ts_top to beginning of next block
184  task_stack->ts_top = &stack_block->sb_next->sb_block[0];
185  } else { // Alloc new block and link it up
186  kmp_stack_block_t *new_block = (kmp_stack_block_t *)__kmp_thread_calloc(
187  thread, sizeof(kmp_stack_block_t));
188 
189  task_stack->ts_top = &new_block->sb_block[0];
190  stack_block->sb_next = new_block;
191  new_block->sb_prev = stack_block;
192  new_block->sb_next = NULL;
193 
194  KA_TRACE(
195  30,
196  ("__kmp_push_task_stack(): GTID: %d; TASK: %p; Alloc new block: %p\n",
197  gtid, tied_task, new_block));
198  }
199  }
200  KA_TRACE(20, ("__kmp_push_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
201  tied_task));
202 }
203 
204 // __kmp_pop_task_stack: Pop the tied task from the task stack. Don't return
205 // the task, just check to make sure it matches the ending task passed in.
206 //
207 // gtid: global thread identifier for the calling thread
208 // thread: thread info structure containing stack
209 // tied_task: the task popped off the stack
210 // ending_task: the task that is ending (should match popped task)
211 static void __kmp_pop_task_stack(kmp_int32 gtid, kmp_info_t *thread,
212  kmp_taskdata_t *ending_task) {
213  // GEH - need to consider what to do if tt_threads_data not allocated yet
214  kmp_thread_data_t *thread_data =
215  &thread->th.th_task_team->tt_threads_data[__kmp_tid_from_gtid(gtid)];
216  kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
217  kmp_taskdata_t *tied_task;
218 
219  if (ending_task->td_flags.team_serial || ending_task->td_flags.tasking_ser) {
220  // Don't pop anything from stack if team or team tasks are serialized
221  return;
222  }
223 
224  KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
225  KMP_DEBUG_ASSERT(task_stack->ts_entries > 0);
226 
227  KA_TRACE(20, ("__kmp_pop_task_stack(enter): GTID: %d; THREAD: %p\n", gtid,
228  thread));
229 
230  // fix up ts_top if we need to pop from previous block
231  if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
232  kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(task_stack->ts_top);
233 
234  stack_block = stack_block->sb_prev;
235  task_stack->ts_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
236  }
237 
238  // finish bookkeeping
239  task_stack->ts_top--;
240  task_stack->ts_entries--;
241 
242  tied_task = *(task_stack->ts_top);
243 
244  KMP_DEBUG_ASSERT(tied_task != NULL);
245  KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
246  KMP_DEBUG_ASSERT(tied_task == ending_task); // If we built the stack correctly
247 
248  KA_TRACE(20, ("__kmp_pop_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
249  tied_task));
250  return;
251 }
252 #endif /* BUILD_TIED_TASK_STACK */
253 
254 // returns 1 if new task is allowed to execute, 0 otherwise
255 // checks Task Scheduling constraint (if requested) and
256 // mutexinoutset dependencies if any
257 static bool __kmp_task_is_allowed(int gtid, const kmp_int32 is_constrained,
258  const kmp_taskdata_t *tasknew,
259  const kmp_taskdata_t *taskcurr) {
260  if (is_constrained && (tasknew->td_flags.tiedness == TASK_TIED)) {
261  // Check if the candidate obeys the Task Scheduling Constraints (TSC)
262  // only descendant of all deferred tied tasks can be scheduled, checking
263  // the last one is enough, as it in turn is the descendant of all others
264  kmp_taskdata_t *current = taskcurr->td_last_tied;
265  KMP_DEBUG_ASSERT(current != NULL);
266  // check if the task is not suspended on barrier
267  if (current->td_flags.tasktype == TASK_EXPLICIT ||
268  current->td_taskwait_thread > 0) { // <= 0 on barrier
269  kmp_int32 level = current->td_level;
270  kmp_taskdata_t *parent = tasknew->td_parent;
271  while (parent != current && parent->td_level > level) {
272  // check generation up to the level of the current task
273  parent = parent->td_parent;
274  KMP_DEBUG_ASSERT(parent != NULL);
275  }
276  if (parent != current)
277  return false;
278  }
279  }
280  // Check mutexinoutset dependencies, acquire locks
281  kmp_depnode_t *node = tasknew->td_depnode;
282  if (node && (node->dn.mtx_num_locks > 0)) {
283  for (int i = 0; i < node->dn.mtx_num_locks; ++i) {
284  KMP_DEBUG_ASSERT(node->dn.mtx_locks[i] != NULL);
285  if (__kmp_test_lock(node->dn.mtx_locks[i], gtid))
286  continue;
287  // could not get the lock, release previous locks
288  for (int j = i - 1; j >= 0; --j)
289  __kmp_release_lock(node->dn.mtx_locks[j], gtid);
290  return false;
291  }
292  // negative num_locks means all locks acquired successfully
293  node->dn.mtx_num_locks = -node->dn.mtx_num_locks;
294  }
295  return true;
296 }
297 
298 // __kmp_realloc_task_deque:
299 // Re-allocates a task deque for a particular thread, copies the content from
300 // the old deque and adjusts the necessary data structures relating to the
301 // deque. This operation must be done with the deque_lock being held
302 static void __kmp_realloc_task_deque(kmp_info_t *thread,
303  kmp_thread_data_t *thread_data) {
304  kmp_int32 size = TASK_DEQUE_SIZE(thread_data->td);
305  kmp_int32 new_size = 2 * size;
306 
307  KE_TRACE(10, ("__kmp_realloc_task_deque: T#%d reallocating deque[from %d to "
308  "%d] for thread_data %p\n",
309  __kmp_gtid_from_thread(thread), size, new_size, thread_data));
310 
311  kmp_taskdata_t **new_deque =
312  (kmp_taskdata_t **)__kmp_allocate(new_size * sizeof(kmp_taskdata_t *));
313 
314  int i, j;
315  for (i = thread_data->td.td_deque_head, j = 0; j < size;
316  i = (i + 1) & TASK_DEQUE_MASK(thread_data->td), j++)
317  new_deque[j] = thread_data->td.td_deque[i];
318 
319  __kmp_free(thread_data->td.td_deque);
320 
321  thread_data->td.td_deque_head = 0;
322  thread_data->td.td_deque_tail = size;
323  thread_data->td.td_deque = new_deque;
324  thread_data->td.td_deque_size = new_size;
325 }
326 
327 // __kmp_push_task: Add a task to the thread's deque
328 static kmp_int32 __kmp_push_task(kmp_int32 gtid, kmp_task_t *task) {
329  kmp_info_t *thread = __kmp_threads[gtid];
330  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
331  kmp_task_team_t *task_team = thread->th.th_task_team;
332  kmp_int32 tid = __kmp_tid_from_gtid(gtid);
333  kmp_thread_data_t *thread_data;
334 
335  KA_TRACE(20,
336  ("__kmp_push_task: T#%d trying to push task %p.\n", gtid, taskdata));
337 
338  if (taskdata->td_flags.tiedness == TASK_UNTIED) {
339  // untied task needs to increment counter so that the task structure is not
340  // freed prematurely
341  kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count);
342  KMP_DEBUG_USE_VAR(counter);
343  KA_TRACE(
344  20,
345  ("__kmp_push_task: T#%d untied_count (%d) incremented for task %p\n",
346  gtid, counter, taskdata));
347  }
348 
349  // The first check avoids building task_team thread data if serialized
350  if (taskdata->td_flags.task_serial) {
351  KA_TRACE(20, ("__kmp_push_task: T#%d team serialized; returning "
352  "TASK_NOT_PUSHED for task %p\n",
353  gtid, taskdata));
354  return TASK_NOT_PUSHED;
355  }
356 
357  // Now that serialized tasks have returned, we can assume that we are not in
358  // immediate exec mode
359  KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
360  if (!KMP_TASKING_ENABLED(task_team)) {
361  __kmp_enable_tasking(task_team, thread);
362  }
363  KMP_DEBUG_ASSERT(TCR_4(task_team->tt.tt_found_tasks) == TRUE);
364  KMP_DEBUG_ASSERT(TCR_PTR(task_team->tt.tt_threads_data) != NULL);
365 
366  // Find tasking deque specific to encountering thread
367  thread_data = &task_team->tt.tt_threads_data[tid];
368 
369  // No lock needed since only owner can allocate
370  if (thread_data->td.td_deque == NULL) {
371  __kmp_alloc_task_deque(thread, thread_data);
372  }
373 
374  int locked = 0;
375  // Check if deque is full
376  if (TCR_4(thread_data->td.td_deque_ntasks) >=
377  TASK_DEQUE_SIZE(thread_data->td)) {
378  if (__kmp_task_is_allowed(gtid, __kmp_task_stealing_constraint, taskdata,
379  thread->th.th_current_task)) {
380  KA_TRACE(20, ("__kmp_push_task: T#%d deque is full; returning "
381  "TASK_NOT_PUSHED for task %p\n",
382  gtid, taskdata));
383  return TASK_NOT_PUSHED;
384  } else {
385  __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
386  locked = 1;
387  // expand deque to push the task which is not allowed to execute
388  __kmp_realloc_task_deque(thread, thread_data);
389  }
390  }
391  // Lock the deque for the task push operation
392  if (!locked) {
393  __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
394 #if OMP_45_ENABLED
395  // Need to recheck as we can get a proxy task from thread outside of OpenMP
396  if (TCR_4(thread_data->td.td_deque_ntasks) >=
397  TASK_DEQUE_SIZE(thread_data->td)) {
398  if (__kmp_task_is_allowed(gtid, __kmp_task_stealing_constraint, taskdata,
399  thread->th.th_current_task)) {
400  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
401  KA_TRACE(20, ("__kmp_push_task: T#%d deque is full on 2nd check; "
402  "returning TASK_NOT_PUSHED for task %p\n",
403  gtid, taskdata));
404  return TASK_NOT_PUSHED;
405  } else {
406  // expand deque to push the task which is not allowed to execute
407  __kmp_realloc_task_deque(thread, thread_data);
408  }
409  }
410 #endif
411  }
412  // Must have room since no thread can add tasks but calling thread
413  KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) <
414  TASK_DEQUE_SIZE(thread_data->td));
415 
416  thread_data->td.td_deque[thread_data->td.td_deque_tail] =
417  taskdata; // Push taskdata
418  // Wrap index.
419  thread_data->td.td_deque_tail =
420  (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
421  TCW_4(thread_data->td.td_deque_ntasks,
422  TCR_4(thread_data->td.td_deque_ntasks) + 1); // Adjust task count
423 
424  KA_TRACE(20, ("__kmp_push_task: T#%d returning TASK_SUCCESSFULLY_PUSHED: "
425  "task=%p ntasks=%d head=%u tail=%u\n",
426  gtid, taskdata, thread_data->td.td_deque_ntasks,
427  thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
428 
429  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
430 
431  return TASK_SUCCESSFULLY_PUSHED;
432 }
433 
434 // __kmp_pop_current_task_from_thread: set up current task from called thread
435 // when team ends
436 //
437 // this_thr: thread structure to set current_task in.
438 void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr) {
439  KF_TRACE(10, ("__kmp_pop_current_task_from_thread(enter): T#%d "
440  "this_thread=%p, curtask=%p, "
441  "curtask_parent=%p\n",
442  0, this_thr, this_thr->th.th_current_task,
443  this_thr->th.th_current_task->td_parent));
444 
445  this_thr->th.th_current_task = this_thr->th.th_current_task->td_parent;
446 
447  KF_TRACE(10, ("__kmp_pop_current_task_from_thread(exit): T#%d "
448  "this_thread=%p, curtask=%p, "
449  "curtask_parent=%p\n",
450  0, this_thr, this_thr->th.th_current_task,
451  this_thr->th.th_current_task->td_parent));
452 }
453 
454 // __kmp_push_current_task_to_thread: set up current task in called thread for a
455 // new team
456 //
457 // this_thr: thread structure to set up
458 // team: team for implicit task data
459 // tid: thread within team to set up
460 void __kmp_push_current_task_to_thread(kmp_info_t *this_thr, kmp_team_t *team,
461  int tid) {
462  // current task of the thread is a parent of the new just created implicit
463  // tasks of new team
464  KF_TRACE(10, ("__kmp_push_current_task_to_thread(enter): T#%d this_thread=%p "
465  "curtask=%p "
466  "parent_task=%p\n",
467  tid, this_thr, this_thr->th.th_current_task,
468  team->t.t_implicit_task_taskdata[tid].td_parent));
469 
470  KMP_DEBUG_ASSERT(this_thr != NULL);
471 
472  if (tid == 0) {
473  if (this_thr->th.th_current_task != &team->t.t_implicit_task_taskdata[0]) {
474  team->t.t_implicit_task_taskdata[0].td_parent =
475  this_thr->th.th_current_task;
476  this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[0];
477  }
478  } else {
479  team->t.t_implicit_task_taskdata[tid].td_parent =
480  team->t.t_implicit_task_taskdata[0].td_parent;
481  this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[tid];
482  }
483 
484  KF_TRACE(10, ("__kmp_push_current_task_to_thread(exit): T#%d this_thread=%p "
485  "curtask=%p "
486  "parent_task=%p\n",
487  tid, this_thr, this_thr->th.th_current_task,
488  team->t.t_implicit_task_taskdata[tid].td_parent));
489 }
490 
491 // __kmp_task_start: bookkeeping for a task starting execution
492 //
493 // GTID: global thread id of calling thread
494 // task: task starting execution
495 // current_task: task suspending
496 static void __kmp_task_start(kmp_int32 gtid, kmp_task_t *task,
497  kmp_taskdata_t *current_task) {
498  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
499  kmp_info_t *thread = __kmp_threads[gtid];
500 
501  KA_TRACE(10,
502  ("__kmp_task_start(enter): T#%d starting task %p: current_task=%p\n",
503  gtid, taskdata, current_task));
504 
505  KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
506 
507  // mark currently executing task as suspended
508  // TODO: GEH - make sure root team implicit task is initialized properly.
509  // KMP_DEBUG_ASSERT( current_task -> td_flags.executing == 1 );
510  current_task->td_flags.executing = 0;
511 
512 // Add task to stack if tied
513 #ifdef BUILD_TIED_TASK_STACK
514  if (taskdata->td_flags.tiedness == TASK_TIED) {
515  __kmp_push_task_stack(gtid, thread, taskdata);
516  }
517 #endif /* BUILD_TIED_TASK_STACK */
518 
519  // mark starting task as executing and as current task
520  thread->th.th_current_task = taskdata;
521 
522  KMP_DEBUG_ASSERT(taskdata->td_flags.started == 0 ||
523  taskdata->td_flags.tiedness == TASK_UNTIED);
524  KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0 ||
525  taskdata->td_flags.tiedness == TASK_UNTIED);
526  taskdata->td_flags.started = 1;
527  taskdata->td_flags.executing = 1;
528  KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
529  KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
530 
531  // GEH TODO: shouldn't we pass some sort of location identifier here?
532  // APT: yes, we will pass location here.
533  // need to store current thread state (in a thread or taskdata structure)
534  // before setting work_state, otherwise wrong state is set after end of task
535 
536  KA_TRACE(10, ("__kmp_task_start(exit): T#%d task=%p\n", gtid, taskdata));
537 
538  return;
539 }
540 
541 #if OMPT_SUPPORT
542 //------------------------------------------------------------------------------
543 // __ompt_task_init:
544 // Initialize OMPT fields maintained by a task. This will only be called after
545 // ompt_start_tool, so we already know whether ompt is enabled or not.
546 
547 static inline void __ompt_task_init(kmp_taskdata_t *task, int tid) {
548  // The calls to __ompt_task_init already have the ompt_enabled condition.
549  task->ompt_task_info.task_data.value = 0;
550  task->ompt_task_info.frame.exit_frame = NULL;
551  task->ompt_task_info.frame.enter_frame = NULL;
552 #if OMP_40_ENABLED
553  task->ompt_task_info.ndeps = 0;
554  task->ompt_task_info.deps = NULL;
555 #endif /* OMP_40_ENABLED */
556 }
557 
558 // __ompt_task_start:
559 // Build and trigger task-begin event
560 static inline void __ompt_task_start(kmp_task_t *task,
561  kmp_taskdata_t *current_task,
562  kmp_int32 gtid) {
563  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
564  ompt_task_status_t status = ompt_task_switch;
565  if (__kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded) {
566  status = ompt_task_yield;
567  __kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded = 0;
568  }
569  /* let OMPT know that we're about to run this task */
570  if (ompt_enabled.ompt_callback_task_schedule) {
571  ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
572  &(current_task->ompt_task_info.task_data), status,
573  &(taskdata->ompt_task_info.task_data));
574  }
575  taskdata->ompt_task_info.scheduling_parent = current_task;
576 }
577 
578 // __ompt_task_finish:
579 // Build and trigger final task-schedule event
580 static inline void
581 __ompt_task_finish(kmp_task_t *task, kmp_taskdata_t *resumed_task,
582  ompt_task_status_t status = ompt_task_complete) {
583  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
584  if (__kmp_omp_cancellation && taskdata->td_taskgroup &&
585  taskdata->td_taskgroup->cancel_request == cancel_taskgroup) {
586  status = ompt_task_cancel;
587  }
588 
589  /* let OMPT know that we're returning to the callee task */
590  if (ompt_enabled.ompt_callback_task_schedule) {
591  ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
592  &(taskdata->ompt_task_info.task_data), status,
593  &((resumed_task ? resumed_task
594  : (taskdata->ompt_task_info.scheduling_parent
595  ? taskdata->ompt_task_info.scheduling_parent
596  : taskdata->td_parent))
597  ->ompt_task_info.task_data));
598  }
599 }
600 #endif
601 
602 template <bool ompt>
603 static void __kmpc_omp_task_begin_if0_template(ident_t *loc_ref, kmp_int32 gtid,
604  kmp_task_t *task,
605  void *frame_address,
606  void *return_address) {
607  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
608  kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
609 
610  KA_TRACE(10, ("__kmpc_omp_task_begin_if0(enter): T#%d loc=%p task=%p "
611  "current_task=%p\n",
612  gtid, loc_ref, taskdata, current_task));
613 
614  if (taskdata->td_flags.tiedness == TASK_UNTIED) {
615  // untied task needs to increment counter so that the task structure is not
616  // freed prematurely
617  kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count);
618  KMP_DEBUG_USE_VAR(counter);
619  KA_TRACE(20, ("__kmpc_omp_task_begin_if0: T#%d untied_count (%d) "
620  "incremented for task %p\n",
621  gtid, counter, taskdata));
622  }
623 
624  taskdata->td_flags.task_serial =
625  1; // Execute this task immediately, not deferred.
626  __kmp_task_start(gtid, task, current_task);
627 
628 #if OMPT_SUPPORT
629  if (ompt) {
630  if (current_task->ompt_task_info.frame.enter_frame == NULL) {
631  current_task->ompt_task_info.frame.enter_frame =
632  taskdata->ompt_task_info.frame.exit_frame = frame_address;
633  }
634  if (ompt_enabled.ompt_callback_task_create) {
635  ompt_task_info_t *parent_info = &(current_task->ompt_task_info);
636  ompt_callbacks.ompt_callback(ompt_callback_task_create)(
637  &(parent_info->task_data), &(parent_info->frame),
638  &(taskdata->ompt_task_info.task_data),
639  ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(taskdata), 0,
640  return_address);
641  }
642  __ompt_task_start(task, current_task, gtid);
643  }
644 #endif // OMPT_SUPPORT
645 
646  KA_TRACE(10, ("__kmpc_omp_task_begin_if0(exit): T#%d loc=%p task=%p,\n", gtid,
647  loc_ref, taskdata));
648 }
649 
650 #if OMPT_SUPPORT
651 OMPT_NOINLINE
652 static void __kmpc_omp_task_begin_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
653  kmp_task_t *task,
654  void *frame_address,
655  void *return_address) {
656  __kmpc_omp_task_begin_if0_template<true>(loc_ref, gtid, task, frame_address,
657  return_address);
658 }
659 #endif // OMPT_SUPPORT
660 
661 // __kmpc_omp_task_begin_if0: report that a given serialized task has started
662 // execution
663 //
664 // loc_ref: source location information; points to beginning of task block.
665 // gtid: global thread number.
666 // task: task thunk for the started task.
667 void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid,
668  kmp_task_t *task) {
669 #if OMPT_SUPPORT
670  if (UNLIKELY(ompt_enabled.enabled)) {
671  OMPT_STORE_RETURN_ADDRESS(gtid);
672  __kmpc_omp_task_begin_if0_ompt(loc_ref, gtid, task,
673  OMPT_GET_FRAME_ADDRESS(1),
674  OMPT_LOAD_RETURN_ADDRESS(gtid));
675  return;
676  }
677 #endif
678  __kmpc_omp_task_begin_if0_template<false>(loc_ref, gtid, task, NULL, NULL);
679 }
680 
681 #ifdef TASK_UNUSED
682 // __kmpc_omp_task_begin: report that a given task has started execution
683 // NEVER GENERATED BY COMPILER, DEPRECATED!!!
684 void __kmpc_omp_task_begin(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task) {
685  kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
686 
687  KA_TRACE(
688  10,
689  ("__kmpc_omp_task_begin(enter): T#%d loc=%p task=%p current_task=%p\n",
690  gtid, loc_ref, KMP_TASK_TO_TASKDATA(task), current_task));
691 
692  __kmp_task_start(gtid, task, current_task);
693 
694  KA_TRACE(10, ("__kmpc_omp_task_begin(exit): T#%d loc=%p task=%p,\n", gtid,
695  loc_ref, KMP_TASK_TO_TASKDATA(task)));
696  return;
697 }
698 #endif // TASK_UNUSED
699 
700 // __kmp_free_task: free the current task space and the space for shareds
701 //
702 // gtid: Global thread ID of calling thread
703 // taskdata: task to free
704 // thread: thread data structure of caller
705 static void __kmp_free_task(kmp_int32 gtid, kmp_taskdata_t *taskdata,
706  kmp_info_t *thread) {
707  KA_TRACE(30, ("__kmp_free_task: T#%d freeing data from task %p\n", gtid,
708  taskdata));
709 
710  // Check to make sure all flags and counters have the correct values
711  KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
712  KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0);
713  KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 1);
714  KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
715  KMP_DEBUG_ASSERT(taskdata->td_allocated_child_tasks == 0 ||
716  taskdata->td_flags.task_serial == 1);
717  KMP_DEBUG_ASSERT(taskdata->td_incomplete_child_tasks == 0);
718 
719  taskdata->td_flags.freed = 1;
720  ANNOTATE_HAPPENS_BEFORE(taskdata);
721 // deallocate the taskdata and shared variable blocks associated with this task
722 #if USE_FAST_MEMORY
723  __kmp_fast_free(thread, taskdata);
724 #else /* ! USE_FAST_MEMORY */
725  __kmp_thread_free(thread, taskdata);
726 #endif
727 
728  KA_TRACE(20, ("__kmp_free_task: T#%d freed task %p\n", gtid, taskdata));
729 }
730 
731 // __kmp_free_task_and_ancestors: free the current task and ancestors without
732 // children
733 //
734 // gtid: Global thread ID of calling thread
735 // taskdata: task to free
736 // thread: thread data structure of caller
737 static void __kmp_free_task_and_ancestors(kmp_int32 gtid,
738  kmp_taskdata_t *taskdata,
739  kmp_info_t *thread) {
740 #if OMP_45_ENABLED
741  // Proxy tasks must always be allowed to free their parents
742  // because they can be run in background even in serial mode.
743  kmp_int32 team_serial =
744  (taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) &&
745  !taskdata->td_flags.proxy;
746 #else
747  kmp_int32 team_serial =
748  taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser;
749 #endif
750  KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
751 
752  kmp_int32 children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1;
753  KMP_DEBUG_ASSERT(children >= 0);
754 
755  // Now, go up the ancestor tree to see if any ancestors can now be freed.
756  while (children == 0) {
757  kmp_taskdata_t *parent_taskdata = taskdata->td_parent;
758 
759  KA_TRACE(20, ("__kmp_free_task_and_ancestors(enter): T#%d task %p complete "
760  "and freeing itself\n",
761  gtid, taskdata));
762 
763  // --- Deallocate my ancestor task ---
764  __kmp_free_task(gtid, taskdata, thread);
765 
766  taskdata = parent_taskdata;
767 
768  if (team_serial)
769  return;
770  // Stop checking ancestors at implicit task instead of walking up ancestor
771  // tree to avoid premature deallocation of ancestors.
772  if (taskdata->td_flags.tasktype == TASK_IMPLICIT) {
773  if (taskdata->td_dephash) { // do we need to cleanup dephash?
774  int children = KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks);
775  kmp_tasking_flags_t flags_old = taskdata->td_flags;
776  if (children == 0 && flags_old.complete == 1) {
777  kmp_tasking_flags_t flags_new = flags_old;
778  flags_new.complete = 0;
779  if (KMP_COMPARE_AND_STORE_ACQ32(
780  RCAST(kmp_int32 *, &taskdata->td_flags),
781  *RCAST(kmp_int32 *, &flags_old),
782  *RCAST(kmp_int32 *, &flags_new))) {
783  KA_TRACE(100, ("__kmp_free_task_and_ancestors: T#%d cleans "
784  "dephash of implicit task %p\n",
785  gtid, taskdata));
786  // cleanup dephash of finished implicit task
787  __kmp_dephash_free_entries(thread, taskdata->td_dephash);
788  }
789  }
790  }
791  return;
792  }
793  // Predecrement simulated by "- 1" calculation
794  children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1;
795  KMP_DEBUG_ASSERT(children >= 0);
796  }
797 
798  KA_TRACE(
799  20, ("__kmp_free_task_and_ancestors(exit): T#%d task %p has %d children; "
800  "not freeing it yet\n",
801  gtid, taskdata, children));
802 }
803 
804 // __kmp_task_finish: bookkeeping to do when a task finishes execution
805 //
806 // gtid: global thread ID for calling thread
807 // task: task to be finished
808 // resumed_task: task to be resumed. (may be NULL if task is serialized)
809 template <bool ompt>
810 static void __kmp_task_finish(kmp_int32 gtid, kmp_task_t *task,
811  kmp_taskdata_t *resumed_task) {
812  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
813  kmp_info_t *thread = __kmp_threads[gtid];
814  kmp_task_team_t *task_team =
815  thread->th.th_task_team; // might be NULL for serial teams...
816  kmp_int32 children = 0;
817 
818  KA_TRACE(10, ("__kmp_task_finish(enter): T#%d finishing task %p and resuming "
819  "task %p\n",
820  gtid, taskdata, resumed_task));
821 
822  KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
823 
824 // Pop task from stack if tied
825 #ifdef BUILD_TIED_TASK_STACK
826  if (taskdata->td_flags.tiedness == TASK_TIED) {
827  __kmp_pop_task_stack(gtid, thread, taskdata);
828  }
829 #endif /* BUILD_TIED_TASK_STACK */
830 
831  if (taskdata->td_flags.tiedness == TASK_UNTIED) {
832  // untied task needs to check the counter so that the task structure is not
833  // freed prematurely
834  kmp_int32 counter = KMP_ATOMIC_DEC(&taskdata->td_untied_count) - 1;
835  KA_TRACE(
836  20,
837  ("__kmp_task_finish: T#%d untied_count (%d) decremented for task %p\n",
838  gtid, counter, taskdata));
839  if (counter > 0) {
840  // untied task is not done, to be continued possibly by other thread, do
841  // not free it now
842  if (resumed_task == NULL) {
843  KMP_DEBUG_ASSERT(taskdata->td_flags.task_serial);
844  resumed_task = taskdata->td_parent; // In a serialized task, the resumed
845  // task is the parent
846  }
847  thread->th.th_current_task = resumed_task; // restore current_task
848  resumed_task->td_flags.executing = 1; // resume previous task
849  KA_TRACE(10, ("__kmp_task_finish(exit): T#%d partially done task %p, "
850  "resuming task %p\n",
851  gtid, taskdata, resumed_task));
852  return;
853  }
854  }
855 #if OMPT_SUPPORT
856  if (ompt)
857  __ompt_task_finish(task, resumed_task);
858 #endif
859 
860  // Check mutexinoutset dependencies, release locks
861  kmp_depnode_t *node = taskdata->td_depnode;
862  if (node && (node->dn.mtx_num_locks < 0)) {
863  // negative num_locks means all locks were acquired
864  node->dn.mtx_num_locks = -node->dn.mtx_num_locks;
865  for (int i = node->dn.mtx_num_locks - 1; i >= 0; --i) {
866  KMP_DEBUG_ASSERT(node->dn.mtx_locks[i] != NULL);
867  __kmp_release_lock(node->dn.mtx_locks[i], gtid);
868  }
869  }
870 
871  KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
872  taskdata->td_flags.complete = 1; // mark the task as completed
873  KMP_DEBUG_ASSERT(taskdata->td_flags.started == 1);
874  KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
875 
876  // Only need to keep track of count if team parallel and tasking not
877  // serialized
878  if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) {
879  // Predecrement simulated by "- 1" calculation
880  children =
881  KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks) - 1;
882  KMP_DEBUG_ASSERT(children >= 0);
883 #if OMP_40_ENABLED
884  if (taskdata->td_taskgroup)
885  KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
886  __kmp_release_deps(gtid, taskdata);
887 #if OMP_45_ENABLED
888  } else if (task_team && task_team->tt.tt_found_proxy_tasks) {
889  // if we found proxy tasks there could exist a dependency chain
890  // with the proxy task as origin
891  __kmp_release_deps(gtid, taskdata);
892 #endif // OMP_45_ENABLED
893 #endif // OMP_40_ENABLED
894  }
895 
896  // td_flags.executing must be marked as 0 after __kmp_release_deps has been
897  // called. Othertwise, if a task is executed immediately from the release_deps
898  // code, the flag will be reset to 1 again by this same function
899  KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1);
900  taskdata->td_flags.executing = 0; // suspend the finishing task
901 
902  KA_TRACE(
903  20, ("__kmp_task_finish: T#%d finished task %p, %d incomplete children\n",
904  gtid, taskdata, children));
905 
906 #if OMP_40_ENABLED
907  /* If the tasks' destructor thunk flag has been set, we need to invoke the
908  destructor thunk that has been generated by the compiler. The code is
909  placed here, since at this point other tasks might have been released
910  hence overlapping the destructor invokations with some other work in the
911  released tasks. The OpenMP spec is not specific on when the destructors
912  are invoked, so we should be free to choose. */
913  if (taskdata->td_flags.destructors_thunk) {
914  kmp_routine_entry_t destr_thunk = task->data1.destructors;
915  KMP_ASSERT(destr_thunk);
916  destr_thunk(gtid, task);
917  }
918 #endif // OMP_40_ENABLED
919 
920  // bookkeeping for resuming task:
921  // GEH - note tasking_ser => task_serial
922  KMP_DEBUG_ASSERT(
923  (taskdata->td_flags.tasking_ser || taskdata->td_flags.task_serial) ==
924  taskdata->td_flags.task_serial);
925  if (taskdata->td_flags.task_serial) {
926  if (resumed_task == NULL) {
927  resumed_task = taskdata->td_parent; // In a serialized task, the resumed
928  // task is the parent
929  }
930  } else {
931  KMP_DEBUG_ASSERT(resumed_task !=
932  NULL); // verify that resumed task is passed as arguemnt
933  }
934 
935  // Free this task and then ancestor tasks if they have no children.
936  // Restore th_current_task first as suggested by John:
937  // johnmc: if an asynchronous inquiry peers into the runtime system
938  // it doesn't see the freed task as the current task.
939  thread->th.th_current_task = resumed_task;
940  __kmp_free_task_and_ancestors(gtid, taskdata, thread);
941 
942  // TODO: GEH - make sure root team implicit task is initialized properly.
943  // KMP_DEBUG_ASSERT( resumed_task->td_flags.executing == 0 );
944  resumed_task->td_flags.executing = 1; // resume previous task
945 
946  KA_TRACE(
947  10, ("__kmp_task_finish(exit): T#%d finished task %p, resuming task %p\n",
948  gtid, taskdata, resumed_task));
949 
950  return;
951 }
952 
953 template <bool ompt>
954 static void __kmpc_omp_task_complete_if0_template(ident_t *loc_ref,
955  kmp_int32 gtid,
956  kmp_task_t *task) {
957  KA_TRACE(10, ("__kmpc_omp_task_complete_if0(enter): T#%d loc=%p task=%p\n",
958  gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
959  // this routine will provide task to resume
960  __kmp_task_finish<ompt>(gtid, task, NULL);
961 
962  KA_TRACE(10, ("__kmpc_omp_task_complete_if0(exit): T#%d loc=%p task=%p\n",
963  gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
964 
965 #if OMPT_SUPPORT
966  if (ompt) {
967  omp_frame_t *ompt_frame;
968  __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
969  ompt_frame->enter_frame = NULL;
970  }
971 #endif
972 
973  return;
974 }
975 
976 #if OMPT_SUPPORT
977 OMPT_NOINLINE
978 void __kmpc_omp_task_complete_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
979  kmp_task_t *task) {
980  __kmpc_omp_task_complete_if0_template<true>(loc_ref, gtid, task);
981 }
982 #endif // OMPT_SUPPORT
983 
984 // __kmpc_omp_task_complete_if0: report that a task has completed execution
985 //
986 // loc_ref: source location information; points to end of task block.
987 // gtid: global thread number.
988 // task: task thunk for the completed task.
989 void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid,
990  kmp_task_t *task) {
991 #if OMPT_SUPPORT
992  if (UNLIKELY(ompt_enabled.enabled)) {
993  __kmpc_omp_task_complete_if0_ompt(loc_ref, gtid, task);
994  return;
995  }
996 #endif
997  __kmpc_omp_task_complete_if0_template<false>(loc_ref, gtid, task);
998 }
999 
1000 #ifdef TASK_UNUSED
1001 // __kmpc_omp_task_complete: report that a task has completed execution
1002 // NEVER GENERATED BY COMPILER, DEPRECATED!!!
1003 void __kmpc_omp_task_complete(ident_t *loc_ref, kmp_int32 gtid,
1004  kmp_task_t *task) {
1005  KA_TRACE(10, ("__kmpc_omp_task_complete(enter): T#%d loc=%p task=%p\n", gtid,
1006  loc_ref, KMP_TASK_TO_TASKDATA(task)));
1007 
1008  __kmp_task_finish<false>(gtid, task,
1009  NULL); // Not sure how to find task to resume
1010 
1011  KA_TRACE(10, ("__kmpc_omp_task_complete(exit): T#%d loc=%p task=%p\n", gtid,
1012  loc_ref, KMP_TASK_TO_TASKDATA(task)));
1013  return;
1014 }
1015 #endif // TASK_UNUSED
1016 
1017 // __kmp_init_implicit_task: Initialize the appropriate fields in the implicit
1018 // task for a given thread
1019 //
1020 // loc_ref: reference to source location of parallel region
1021 // this_thr: thread data structure corresponding to implicit task
1022 // team: team for this_thr
1023 // tid: thread id of given thread within team
1024 // set_curr_task: TRUE if need to push current task to thread
1025 // NOTE: Routine does not set up the implicit task ICVS. This is assumed to
1026 // have already been done elsewhere.
1027 // TODO: Get better loc_ref. Value passed in may be NULL
1028 void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr,
1029  kmp_team_t *team, int tid, int set_curr_task) {
1030  kmp_taskdata_t *task = &team->t.t_implicit_task_taskdata[tid];
1031 
1032  KF_TRACE(
1033  10,
1034  ("__kmp_init_implicit_task(enter): T#:%d team=%p task=%p, reinit=%s\n",
1035  tid, team, task, set_curr_task ? "TRUE" : "FALSE"));
1036 
1037  task->td_task_id = KMP_GEN_TASK_ID();
1038  task->td_team = team;
1039  // task->td_parent = NULL; // fix for CQ230101 (broken parent task info
1040  // in debugger)
1041  task->td_ident = loc_ref;
1042  task->td_taskwait_ident = NULL;
1043  task->td_taskwait_counter = 0;
1044  task->td_taskwait_thread = 0;
1045 
1046  task->td_flags.tiedness = TASK_TIED;
1047  task->td_flags.tasktype = TASK_IMPLICIT;
1048 #if OMP_45_ENABLED
1049  task->td_flags.proxy = TASK_FULL;
1050 #endif
1051 
1052  // All implicit tasks are executed immediately, not deferred
1053  task->td_flags.task_serial = 1;
1054  task->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
1055  task->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
1056 
1057  task->td_flags.started = 1;
1058  task->td_flags.executing = 1;
1059  task->td_flags.complete = 0;
1060  task->td_flags.freed = 0;
1061 
1062 #if OMP_40_ENABLED
1063  task->td_depnode = NULL;
1064 #endif
1065  task->td_last_tied = task;
1066 
1067  if (set_curr_task) { // only do this init first time thread is created
1068  KMP_ATOMIC_ST_REL(&task->td_incomplete_child_tasks, 0);
1069  // Not used: don't need to deallocate implicit task
1070  KMP_ATOMIC_ST_REL(&task->td_allocated_child_tasks, 0);
1071 #if OMP_40_ENABLED
1072  task->td_taskgroup = NULL; // An implicit task does not have taskgroup
1073  task->td_dephash = NULL;
1074 #endif
1075  __kmp_push_current_task_to_thread(this_thr, team, tid);
1076  } else {
1077  KMP_DEBUG_ASSERT(task->td_incomplete_child_tasks == 0);
1078  KMP_DEBUG_ASSERT(task->td_allocated_child_tasks == 0);
1079  }
1080 
1081 #if OMPT_SUPPORT
1082  if (UNLIKELY(ompt_enabled.enabled))
1083  __ompt_task_init(task, tid);
1084 #endif
1085 
1086  KF_TRACE(10, ("__kmp_init_implicit_task(exit): T#:%d team=%p task=%p\n", tid,
1087  team, task));
1088 }
1089 
1090 // __kmp_finish_implicit_task: Release resources associated to implicit tasks
1091 // at the end of parallel regions. Some resources are kept for reuse in the next
1092 // parallel region.
1093 //
1094 // thread: thread data structure corresponding to implicit task
1095 void __kmp_finish_implicit_task(kmp_info_t *thread) {
1096  kmp_taskdata_t *task = thread->th.th_current_task;
1097  if (task->td_dephash) {
1098  int children;
1099  task->td_flags.complete = 1;
1100  children = KMP_ATOMIC_LD_ACQ(&task->td_incomplete_child_tasks);
1101  kmp_tasking_flags_t flags_old = task->td_flags;
1102  if (children == 0 && flags_old.complete == 1) {
1103  kmp_tasking_flags_t flags_new = flags_old;
1104  flags_new.complete = 0;
1105  if (KMP_COMPARE_AND_STORE_ACQ32(RCAST(kmp_int32 *, &task->td_flags),
1106  *RCAST(kmp_int32 *, &flags_old),
1107  *RCAST(kmp_int32 *, &flags_new))) {
1108  KA_TRACE(100, ("__kmp_finish_implicit_task: T#%d cleans "
1109  "dephash of implicit task %p\n",
1110  thread->th.th_info.ds.ds_gtid, task));
1111  __kmp_dephash_free_entries(thread, task->td_dephash);
1112  }
1113  }
1114  }
1115 }
1116 
1117 // __kmp_free_implicit_task: Release resources associated to implicit tasks
1118 // when these are destroyed regions
1119 //
1120 // thread: thread data structure corresponding to implicit task
1121 void __kmp_free_implicit_task(kmp_info_t *thread) {
1122  kmp_taskdata_t *task = thread->th.th_current_task;
1123  if (task && task->td_dephash) {
1124  __kmp_dephash_free(thread, task->td_dephash);
1125  task->td_dephash = NULL;
1126  }
1127 }
1128 
1129 // Round up a size to a power of two specified by val: Used to insert padding
1130 // between structures co-allocated using a single malloc() call
1131 static size_t __kmp_round_up_to_val(size_t size, size_t val) {
1132  if (size & (val - 1)) {
1133  size &= ~(val - 1);
1134  if (size <= KMP_SIZE_T_MAX - val) {
1135  size += val; // Round up if there is no overflow.
1136  }
1137  }
1138  return size;
1139 } // __kmp_round_up_to_va
1140 
1141 // __kmp_task_alloc: Allocate the taskdata and task data structures for a task
1142 //
1143 // loc_ref: source location information
1144 // gtid: global thread number.
1145 // flags: include tiedness & task type (explicit vs. implicit) of the ''new''
1146 // task encountered. Converted from kmp_int32 to kmp_tasking_flags_t in routine.
1147 // sizeof_kmp_task_t: Size in bytes of kmp_task_t data structure including
1148 // private vars accessed in task.
1149 // sizeof_shareds: Size in bytes of array of pointers to shared vars accessed
1150 // in task.
1151 // task_entry: Pointer to task code entry point generated by compiler.
1152 // returns: a pointer to the allocated kmp_task_t structure (task).
1153 kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1154  kmp_tasking_flags_t *flags,
1155  size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1156  kmp_routine_entry_t task_entry) {
1157  kmp_task_t *task;
1158  kmp_taskdata_t *taskdata;
1159  kmp_info_t *thread = __kmp_threads[gtid];
1160  kmp_team_t *team = thread->th.th_team;
1161  kmp_taskdata_t *parent_task = thread->th.th_current_task;
1162  size_t shareds_offset;
1163 
1164  if (!TCR_4(__kmp_init_middle))
1165  __kmp_middle_initialize();
1166 
1167  KA_TRACE(10, ("__kmp_task_alloc(enter): T#%d loc=%p, flags=(0x%x) "
1168  "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1169  gtid, loc_ref, *((kmp_int32 *)flags), sizeof_kmp_task_t,
1170  sizeof_shareds, task_entry));
1171 
1172  if (parent_task->td_flags.final) {
1173  if (flags->merged_if0) {
1174  }
1175  flags->final = 1;
1176  }
1177  if (flags->tiedness == TASK_UNTIED && !team->t.t_serialized) {
1178  // Untied task encountered causes the TSC algorithm to check entire deque of
1179  // the victim thread. If no untied task encountered, then checking the head
1180  // of the deque should be enough.
1181  KMP_CHECK_UPDATE(thread->th.th_task_team->tt.tt_untied_task_encountered, 1);
1182  }
1183 
1184 #if OMP_45_ENABLED
1185  if (flags->proxy == TASK_PROXY) {
1186  flags->tiedness = TASK_UNTIED;
1187  flags->merged_if0 = 1;
1188 
1189  /* are we running in a sequential parallel or tskm_immediate_exec... we need
1190  tasking support enabled */
1191  if ((thread->th.th_task_team) == NULL) {
1192  /* This should only happen if the team is serialized
1193  setup a task team and propagate it to the thread */
1194  KMP_DEBUG_ASSERT(team->t.t_serialized);
1195  KA_TRACE(30,
1196  ("T#%d creating task team in __kmp_task_alloc for proxy task\n",
1197  gtid));
1198  __kmp_task_team_setup(
1199  thread, team,
1200  1); // 1 indicates setup the current team regardless of nthreads
1201  thread->th.th_task_team = team->t.t_task_team[thread->th.th_task_state];
1202  }
1203  kmp_task_team_t *task_team = thread->th.th_task_team;
1204 
1205  /* tasking must be enabled now as the task might not be pushed */
1206  if (!KMP_TASKING_ENABLED(task_team)) {
1207  KA_TRACE(
1208  30,
1209  ("T#%d enabling tasking in __kmp_task_alloc for proxy task\n", gtid));
1210  __kmp_enable_tasking(task_team, thread);
1211  kmp_int32 tid = thread->th.th_info.ds.ds_tid;
1212  kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
1213  // No lock needed since only owner can allocate
1214  if (thread_data->td.td_deque == NULL) {
1215  __kmp_alloc_task_deque(thread, thread_data);
1216  }
1217  }
1218 
1219  if (task_team->tt.tt_found_proxy_tasks == FALSE)
1220  TCW_4(task_team->tt.tt_found_proxy_tasks, TRUE);
1221  }
1222 #endif
1223 
1224  // Calculate shared structure offset including padding after kmp_task_t struct
1225  // to align pointers in shared struct
1226  shareds_offset = sizeof(kmp_taskdata_t) + sizeof_kmp_task_t;
1227  shareds_offset = __kmp_round_up_to_val(shareds_offset, sizeof(void *));
1228 
1229  // Allocate a kmp_taskdata_t block and a kmp_task_t block.
1230  KA_TRACE(30, ("__kmp_task_alloc: T#%d First malloc size: %ld\n", gtid,
1231  shareds_offset));
1232  KA_TRACE(30, ("__kmp_task_alloc: T#%d Second malloc size: %ld\n", gtid,
1233  sizeof_shareds));
1234 
1235 // Avoid double allocation here by combining shareds with taskdata
1236 #if USE_FAST_MEMORY
1237  taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, shareds_offset +
1238  sizeof_shareds);
1239 #else /* ! USE_FAST_MEMORY */
1240  taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, shareds_offset +
1241  sizeof_shareds);
1242 #endif /* USE_FAST_MEMORY */
1243  ANNOTATE_HAPPENS_AFTER(taskdata);
1244 
1245  task = KMP_TASKDATA_TO_TASK(taskdata);
1246 
1247 // Make sure task & taskdata are aligned appropriately
1248 #if KMP_ARCH_X86 || KMP_ARCH_PPC64 || !KMP_HAVE_QUAD
1249  KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(double) - 1)) == 0);
1250  KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(double) - 1)) == 0);
1251 #else
1252  KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(_Quad) - 1)) == 0);
1253  KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(_Quad) - 1)) == 0);
1254 #endif
1255  if (sizeof_shareds > 0) {
1256  // Avoid double allocation here by combining shareds with taskdata
1257  task->shareds = &((char *)taskdata)[shareds_offset];
1258  // Make sure shareds struct is aligned to pointer size
1259  KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
1260  0);
1261  } else {
1262  task->shareds = NULL;
1263  }
1264  task->routine = task_entry;
1265  task->part_id = 0; // AC: Always start with 0 part id
1266 
1267  taskdata->td_task_id = KMP_GEN_TASK_ID();
1268  taskdata->td_team = team;
1269  taskdata->td_alloc_thread = thread;
1270  taskdata->td_parent = parent_task;
1271  taskdata->td_level = parent_task->td_level + 1; // increment nesting level
1272  KMP_ATOMIC_ST_RLX(&taskdata->td_untied_count, 0);
1273  taskdata->td_ident = loc_ref;
1274  taskdata->td_taskwait_ident = NULL;
1275  taskdata->td_taskwait_counter = 0;
1276  taskdata->td_taskwait_thread = 0;
1277  KMP_DEBUG_ASSERT(taskdata->td_parent != NULL);
1278 #if OMP_45_ENABLED
1279  // avoid copying icvs for proxy tasks
1280  if (flags->proxy == TASK_FULL)
1281 #endif
1282  copy_icvs(&taskdata->td_icvs, &taskdata->td_parent->td_icvs);
1283 
1284  taskdata->td_flags.tiedness = flags->tiedness;
1285  taskdata->td_flags.final = flags->final;
1286  taskdata->td_flags.merged_if0 = flags->merged_if0;
1287 #if OMP_40_ENABLED
1288  taskdata->td_flags.destructors_thunk = flags->destructors_thunk;
1289 #endif // OMP_40_ENABLED
1290 #if OMP_45_ENABLED
1291  taskdata->td_flags.proxy = flags->proxy;
1292  taskdata->td_task_team = thread->th.th_task_team;
1293  taskdata->td_size_alloc = shareds_offset + sizeof_shareds;
1294 #endif
1295  taskdata->td_flags.tasktype = TASK_EXPLICIT;
1296 
1297  // GEH - TODO: fix this to copy parent task's value of tasking_ser flag
1298  taskdata->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
1299 
1300  // GEH - TODO: fix this to copy parent task's value of team_serial flag
1301  taskdata->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
1302 
1303  // GEH - Note we serialize the task if the team is serialized to make sure
1304  // implicit parallel region tasks are not left until program termination to
1305  // execute. Also, it helps locality to execute immediately.
1306 
1307  taskdata->td_flags.task_serial =
1308  (parent_task->td_flags.final || taskdata->td_flags.team_serial ||
1309  taskdata->td_flags.tasking_ser);
1310 
1311  taskdata->td_flags.started = 0;
1312  taskdata->td_flags.executing = 0;
1313  taskdata->td_flags.complete = 0;
1314  taskdata->td_flags.freed = 0;
1315 
1316  taskdata->td_flags.native = flags->native;
1317 
1318  KMP_ATOMIC_ST_RLX(&taskdata->td_incomplete_child_tasks, 0);
1319  // start at one because counts current task and children
1320  KMP_ATOMIC_ST_RLX(&taskdata->td_allocated_child_tasks, 1);
1321 #if OMP_40_ENABLED
1322  taskdata->td_taskgroup =
1323  parent_task->td_taskgroup; // task inherits taskgroup from the parent task
1324  taskdata->td_dephash = NULL;
1325  taskdata->td_depnode = NULL;
1326 #endif
1327  if (flags->tiedness == TASK_UNTIED)
1328  taskdata->td_last_tied = NULL; // will be set when the task is scheduled
1329  else
1330  taskdata->td_last_tied = taskdata;
1331 
1332 #if OMPT_SUPPORT
1333  if (UNLIKELY(ompt_enabled.enabled))
1334  __ompt_task_init(taskdata, gtid);
1335 #endif
1336 // Only need to keep track of child task counts if team parallel and tasking not
1337 // serialized or if it is a proxy task
1338 #if OMP_45_ENABLED
1339  if (flags->proxy == TASK_PROXY ||
1340  !(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser))
1341 #else
1342  if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser))
1343 #endif
1344  {
1345  KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks);
1346 #if OMP_40_ENABLED
1347  if (parent_task->td_taskgroup)
1348  KMP_ATOMIC_INC(&parent_task->td_taskgroup->count);
1349 #endif
1350  // Only need to keep track of allocated child tasks for explicit tasks since
1351  // implicit not deallocated
1352  if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT) {
1353  KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks);
1354  }
1355  }
1356 
1357  KA_TRACE(20, ("__kmp_task_alloc(exit): T#%d created task %p parent=%p\n",
1358  gtid, taskdata, taskdata->td_parent));
1359  ANNOTATE_HAPPENS_BEFORE(task);
1360 
1361  return task;
1362 }
1363 
1364 kmp_task_t *__kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1365  kmp_int32 flags, size_t sizeof_kmp_task_t,
1366  size_t sizeof_shareds,
1367  kmp_routine_entry_t task_entry) {
1368  kmp_task_t *retval;
1369  kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
1370 
1371  input_flags->native = FALSE;
1372 // __kmp_task_alloc() sets up all other runtime flags
1373 
1374 #if OMP_45_ENABLED
1375  KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s) "
1376  "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1377  gtid, loc_ref, input_flags->tiedness ? "tied " : "untied",
1378  input_flags->proxy ? "proxy" : "", sizeof_kmp_task_t,
1379  sizeof_shareds, task_entry));
1380 #else
1381  KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s) "
1382  "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1383  gtid, loc_ref, input_flags->tiedness ? "tied " : "untied",
1384  sizeof_kmp_task_t, sizeof_shareds, task_entry));
1385 #endif
1386 
1387  retval = __kmp_task_alloc(loc_ref, gtid, input_flags, sizeof_kmp_task_t,
1388  sizeof_shareds, task_entry);
1389 
1390  KA_TRACE(20, ("__kmpc_omp_task_alloc(exit): T#%d retval %p\n", gtid, retval));
1391 
1392  return retval;
1393 }
1394 
1395 // __kmp_invoke_task: invoke the specified task
1396 //
1397 // gtid: global thread ID of caller
1398 // task: the task to invoke
1399 // current_task: the task to resume after task invokation
1400 static void __kmp_invoke_task(kmp_int32 gtid, kmp_task_t *task,
1401  kmp_taskdata_t *current_task) {
1402  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
1403  kmp_info_t *thread;
1404 #if OMP_40_ENABLED
1405  int discard = 0 /* false */;
1406 #endif
1407  KA_TRACE(
1408  30, ("__kmp_invoke_task(enter): T#%d invoking task %p, current_task=%p\n",
1409  gtid, taskdata, current_task));
1410  KMP_DEBUG_ASSERT(task);
1411 #if OMP_45_ENABLED
1412  if (taskdata->td_flags.proxy == TASK_PROXY &&
1413  taskdata->td_flags.complete == 1) {
1414  // This is a proxy task that was already completed but it needs to run
1415  // its bottom-half finish
1416  KA_TRACE(
1417  30,
1418  ("__kmp_invoke_task: T#%d running bottom finish for proxy task %p\n",
1419  gtid, taskdata));
1420 
1421  __kmp_bottom_half_finish_proxy(gtid, task);
1422 
1423  KA_TRACE(30, ("__kmp_invoke_task(exit): T#%d completed bottom finish for "
1424  "proxy task %p, resuming task %p\n",
1425  gtid, taskdata, current_task));
1426 
1427  return;
1428  }
1429 #endif
1430 
1431 #if OMPT_SUPPORT
1432  // For untied tasks, the first task executed only calls __kmpc_omp_task and
1433  // does not execute code.
1434  ompt_thread_info_t oldInfo;
1435  if (UNLIKELY(ompt_enabled.enabled)) {
1436  // Store the threads states and restore them after the task
1437  thread = __kmp_threads[gtid];
1438  oldInfo = thread->th.ompt_thread_info;
1439  thread->th.ompt_thread_info.wait_id = 0;
1440  thread->th.ompt_thread_info.state = (thread->th.th_team_serialized)
1441  ? omp_state_work_serial
1442  : omp_state_work_parallel;
1443  taskdata->ompt_task_info.frame.exit_frame = OMPT_GET_FRAME_ADDRESS(0);
1444  }
1445 #endif
1446 
1447 #if OMP_45_ENABLED
1448  // Proxy tasks are not handled by the runtime
1449  if (taskdata->td_flags.proxy != TASK_PROXY) {
1450 #endif
1451  ANNOTATE_HAPPENS_AFTER(task);
1452  __kmp_task_start(gtid, task, current_task); // OMPT only if not discarded
1453 #if OMP_45_ENABLED
1454  }
1455 #endif
1456 
1457 #if OMP_40_ENABLED
1458  // TODO: cancel tasks if the parallel region has also been cancelled
1459  // TODO: check if this sequence can be hoisted above __kmp_task_start
1460  // if cancellation has been enabled for this run ...
1461  if (__kmp_omp_cancellation) {
1462  thread = __kmp_threads[gtid];
1463  kmp_team_t *this_team = thread->th.th_team;
1464  kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
1465  if ((taskgroup && taskgroup->cancel_request) ||
1466  (this_team->t.t_cancel_request == cancel_parallel)) {
1467 #if OMPT_SUPPORT && OMPT_OPTIONAL
1468  ompt_data_t *task_data;
1469  if (UNLIKELY(ompt_enabled.ompt_callback_cancel)) {
1470  __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL, NULL);
1471  ompt_callbacks.ompt_callback(ompt_callback_cancel)(
1472  task_data,
1473  ((taskgroup && taskgroup->cancel_request) ? ompt_cancel_taskgroup
1474  : ompt_cancel_parallel) |
1475  ompt_cancel_discarded_task,
1476  NULL);
1477  }
1478 #endif
1479  KMP_COUNT_BLOCK(TASK_cancelled);
1480  // this task belongs to a task group and we need to cancel it
1481  discard = 1 /* true */;
1482  }
1483  }
1484 
1485  // Invoke the task routine and pass in relevant data.
1486  // Thunks generated by gcc take a different argument list.
1487  if (!discard) {
1488  if (taskdata->td_flags.tiedness == TASK_UNTIED) {
1489  taskdata->td_last_tied = current_task->td_last_tied;
1490  KMP_DEBUG_ASSERT(taskdata->td_last_tied);
1491  }
1492 #if KMP_STATS_ENABLED
1493  KMP_COUNT_BLOCK(TASK_executed);
1494  switch (KMP_GET_THREAD_STATE()) {
1495  case FORK_JOIN_BARRIER:
1496  KMP_PUSH_PARTITIONED_TIMER(OMP_task_join_bar);
1497  break;
1498  case PLAIN_BARRIER:
1499  KMP_PUSH_PARTITIONED_TIMER(OMP_task_plain_bar);
1500  break;
1501  case TASKYIELD:
1502  KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskyield);
1503  break;
1504  case TASKWAIT:
1505  KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskwait);
1506  break;
1507  case TASKGROUP:
1508  KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskgroup);
1509  break;
1510  default:
1511  KMP_PUSH_PARTITIONED_TIMER(OMP_task_immediate);
1512  break;
1513  }
1514 #endif // KMP_STATS_ENABLED
1515 #endif // OMP_40_ENABLED
1516 
1517 // OMPT task begin
1518 #if OMPT_SUPPORT
1519  if (UNLIKELY(ompt_enabled.enabled))
1520  __ompt_task_start(task, current_task, gtid);
1521 #endif
1522 
1523 #if USE_ITT_BUILD && USE_ITT_NOTIFY
1524  kmp_uint64 cur_time;
1525  kmp_int32 kmp_itt_count_task =
1526  __kmp_forkjoin_frames_mode == 3 && !taskdata->td_flags.task_serial &&
1527  current_task->td_flags.tasktype == TASK_IMPLICIT;
1528  if (kmp_itt_count_task) {
1529  thread = __kmp_threads[gtid];
1530  // Time outer level explicit task on barrier for adjusting imbalance time
1531  if (thread->th.th_bar_arrive_time)
1532  cur_time = __itt_get_timestamp();
1533  else
1534  kmp_itt_count_task = 0; // thread is not on a barrier - skip timing
1535  }
1536 #endif
1537 
1538 #ifdef KMP_GOMP_COMPAT
1539  if (taskdata->td_flags.native) {
1540  ((void (*)(void *))(*(task->routine)))(task->shareds);
1541  } else
1542 #endif /* KMP_GOMP_COMPAT */
1543  {
1544  (*(task->routine))(gtid, task);
1545  }
1546  KMP_POP_PARTITIONED_TIMER();
1547 
1548 #if USE_ITT_BUILD && USE_ITT_NOTIFY
1549  if (kmp_itt_count_task) {
1550  // Barrier imbalance - adjust arrive time with the task duration
1551  thread->th.th_bar_arrive_time += (__itt_get_timestamp() - cur_time);
1552  }
1553 #endif
1554 
1555 #if OMP_40_ENABLED
1556  }
1557 #endif // OMP_40_ENABLED
1558 
1559 
1560 #if OMP_45_ENABLED
1561  // Proxy tasks are not handled by the runtime
1562  if (taskdata->td_flags.proxy != TASK_PROXY) {
1563 #endif
1564  ANNOTATE_HAPPENS_BEFORE(taskdata->td_parent);
1565 #if OMPT_SUPPORT
1566  if (UNLIKELY(ompt_enabled.enabled)) {
1567  thread->th.ompt_thread_info = oldInfo;
1568  if (taskdata->td_flags.tiedness == TASK_TIED) {
1569  taskdata->ompt_task_info.frame.exit_frame = NULL;
1570  }
1571  __kmp_task_finish<true>(gtid, task, current_task);
1572  } else
1573 #endif
1574  __kmp_task_finish<false>(gtid, task, current_task);
1575 #if OMP_45_ENABLED
1576  }
1577 #endif
1578 
1579  KA_TRACE(
1580  30,
1581  ("__kmp_invoke_task(exit): T#%d completed task %p, resuming task %p\n",
1582  gtid, taskdata, current_task));
1583  return;
1584 }
1585 
1586 // __kmpc_omp_task_parts: Schedule a thread-switchable task for execution
1587 //
1588 // loc_ref: location of original task pragma (ignored)
1589 // gtid: Global Thread ID of encountering thread
1590 // new_task: task thunk allocated by __kmp_omp_task_alloc() for the ''new task''
1591 // Returns:
1592 // TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1593 // be resumed later.
1594 // TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1595 // resumed later.
1596 kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid,
1597  kmp_task_t *new_task) {
1598  kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1599 
1600  KA_TRACE(10, ("__kmpc_omp_task_parts(enter): T#%d loc=%p task=%p\n", gtid,
1601  loc_ref, new_taskdata));
1602 
1603 #if OMPT_SUPPORT
1604  kmp_taskdata_t *parent;
1605  if (UNLIKELY(ompt_enabled.enabled)) {
1606  parent = new_taskdata->td_parent;
1607  if (ompt_enabled.ompt_callback_task_create) {
1608  ompt_data_t task_data = ompt_data_none;
1609  ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1610  parent ? &(parent->ompt_task_info.task_data) : &task_data,
1611  parent ? &(parent->ompt_task_info.frame) : NULL,
1612  &(new_taskdata->ompt_task_info.task_data), ompt_task_explicit, 0,
1613  OMPT_GET_RETURN_ADDRESS(0));
1614  }
1615  }
1616 #endif
1617 
1618  /* Should we execute the new task or queue it? For now, let's just always try
1619  to queue it. If the queue fills up, then we'll execute it. */
1620 
1621  if (__kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
1622  { // Execute this task immediately
1623  kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1624  new_taskdata->td_flags.task_serial = 1;
1625  __kmp_invoke_task(gtid, new_task, current_task);
1626  }
1627 
1628  KA_TRACE(
1629  10,
1630  ("__kmpc_omp_task_parts(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: "
1631  "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
1632  gtid, loc_ref, new_taskdata));
1633 
1634  ANNOTATE_HAPPENS_BEFORE(new_task);
1635 #if OMPT_SUPPORT
1636  if (UNLIKELY(ompt_enabled.enabled)) {
1637  parent->ompt_task_info.frame.enter_frame = NULL;
1638  }
1639 #endif
1640  return TASK_CURRENT_NOT_QUEUED;
1641 }
1642 
1643 // __kmp_omp_task: Schedule a non-thread-switchable task for execution
1644 //
1645 // gtid: Global Thread ID of encountering thread
1646 // new_task:non-thread-switchable task thunk allocated by __kmp_omp_task_alloc()
1647 // serialize_immediate: if TRUE then if the task is executed immediately its
1648 // execution will be serialized
1649 // Returns:
1650 // TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1651 // be resumed later.
1652 // TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1653 // resumed later.
1654 kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
1655  bool serialize_immediate) {
1656  kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1657 
1658 /* Should we execute the new task or queue it? For now, let's just always try to
1659  queue it. If the queue fills up, then we'll execute it. */
1660 #if OMP_45_ENABLED
1661  if (new_taskdata->td_flags.proxy == TASK_PROXY ||
1662  __kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
1663 #else
1664  if (__kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
1665 #endif
1666  { // Execute this task immediately
1667  kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1668  if (serialize_immediate)
1669  new_taskdata->td_flags.task_serial = 1;
1670  __kmp_invoke_task(gtid, new_task, current_task);
1671  }
1672 
1673  ANNOTATE_HAPPENS_BEFORE(new_task);
1674  return TASK_CURRENT_NOT_QUEUED;
1675 }
1676 
1677 // __kmpc_omp_task: Wrapper around __kmp_omp_task to schedule a
1678 // non-thread-switchable task from the parent thread only!
1679 //
1680 // loc_ref: location of original task pragma (ignored)
1681 // gtid: Global Thread ID of encountering thread
1682 // new_task: non-thread-switchable task thunk allocated by
1683 // __kmp_omp_task_alloc()
1684 // Returns:
1685 // TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1686 // be resumed later.
1687 // TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1688 // resumed later.
1689 kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid,
1690  kmp_task_t *new_task) {
1691  kmp_int32 res;
1692  KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK);
1693 
1694 #if KMP_DEBUG || OMPT_SUPPORT
1695  kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1696 #endif
1697  KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,
1698  new_taskdata));
1699 
1700 #if OMPT_SUPPORT
1701  kmp_taskdata_t *parent = NULL;
1702  if (UNLIKELY(ompt_enabled.enabled)) {
1703  if (!new_taskdata->td_flags.started) {
1704  OMPT_STORE_RETURN_ADDRESS(gtid);
1705  parent = new_taskdata->td_parent;
1706  if (!parent->ompt_task_info.frame.enter_frame) {
1707  parent->ompt_task_info.frame.enter_frame = OMPT_GET_FRAME_ADDRESS(1);
1708  }
1709  if (ompt_enabled.ompt_callback_task_create) {
1710  ompt_data_t task_data = ompt_data_none;
1711  ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1712  parent ? &(parent->ompt_task_info.task_data) : &task_data,
1713  parent ? &(parent->ompt_task_info.frame) : NULL,
1714  &(new_taskdata->ompt_task_info.task_data),
1715  ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0,
1716  OMPT_LOAD_RETURN_ADDRESS(gtid));
1717  }
1718  } else {
1719  // We are scheduling the continuation of an UNTIED task.
1720  // Scheduling back to the parent task.
1721  __ompt_task_finish(new_task,
1722  new_taskdata->ompt_task_info.scheduling_parent,
1723  ompt_task_switch);
1724  new_taskdata->ompt_task_info.frame.exit_frame = NULL;
1725  }
1726  }
1727 #endif
1728 
1729  res = __kmp_omp_task(gtid, new_task, true);
1730 
1731  KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "
1732  "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",
1733  gtid, loc_ref, new_taskdata));
1734 #if OMPT_SUPPORT
1735  if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) {
1736  parent->ompt_task_info.frame.enter_frame = NULL;
1737  }
1738 #endif
1739  return res;
1740 }
1741 
1742 // __kmp_omp_taskloop_task: Wrapper around __kmp_omp_task to schedule
1743 // a taskloop task with the correct OMPT return address
1744 //
1745 // loc_ref: location of original task pragma (ignored)
1746 // gtid: Global Thread ID of encountering thread
1747 // new_task: non-thread-switchable task thunk allocated by
1748 // __kmp_omp_task_alloc()
1749 // codeptr_ra: return address for OMPT callback
1750 // Returns:
1751 // TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1752 // be resumed later.
1753 // TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1754 // resumed later.
1755 kmp_int32 __kmp_omp_taskloop_task(ident_t *loc_ref, kmp_int32 gtid,
1756  kmp_task_t *new_task, void *codeptr_ra) {
1757  kmp_int32 res;
1758  KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK);
1759 
1760 #if KMP_DEBUG || OMPT_SUPPORT
1761  kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1762 #endif
1763  KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,
1764  new_taskdata));
1765 
1766 #if OMPT_SUPPORT
1767  kmp_taskdata_t *parent = NULL;
1768  if (UNLIKELY(ompt_enabled.enabled && !new_taskdata->td_flags.started)) {
1769  parent = new_taskdata->td_parent;
1770  if (!parent->ompt_task_info.frame.enter_frame)
1771  parent->ompt_task_info.frame.enter_frame = OMPT_GET_FRAME_ADDRESS(1);
1772  if (ompt_enabled.ompt_callback_task_create) {
1773  ompt_data_t task_data = ompt_data_none;
1774  ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1775  parent ? &(parent->ompt_task_info.task_data) : &task_data,
1776  parent ? &(parent->ompt_task_info.frame) : NULL,
1777  &(new_taskdata->ompt_task_info.task_data),
1778  ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0,
1779  codeptr_ra);
1780  }
1781  }
1782 #endif
1783 
1784  res = __kmp_omp_task(gtid, new_task, true);
1785 
1786  KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "
1787  "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",
1788  gtid, loc_ref, new_taskdata));
1789 #if OMPT_SUPPORT
1790  if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) {
1791  parent->ompt_task_info.frame.enter_frame = NULL;
1792  }
1793 #endif
1794  return res;
1795 }
1796 
1797 template <bool ompt>
1798 static kmp_int32 __kmpc_omp_taskwait_template(ident_t *loc_ref, kmp_int32 gtid,
1799  void *frame_address,
1800  void *return_address) {
1801  kmp_taskdata_t *taskdata;
1802  kmp_info_t *thread;
1803  int thread_finished = FALSE;
1804  KMP_SET_THREAD_STATE_BLOCK(TASKWAIT);
1805 
1806  KA_TRACE(10, ("__kmpc_omp_taskwait(enter): T#%d loc=%p\n", gtid, loc_ref));
1807 
1808  if (__kmp_tasking_mode != tskm_immediate_exec) {
1809  thread = __kmp_threads[gtid];
1810  taskdata = thread->th.th_current_task;
1811 
1812 #if OMPT_SUPPORT && OMPT_OPTIONAL
1813  ompt_data_t *my_task_data;
1814  ompt_data_t *my_parallel_data;
1815 
1816  if (ompt) {
1817  my_task_data = &(taskdata->ompt_task_info.task_data);
1818  my_parallel_data = OMPT_CUR_TEAM_DATA(thread);
1819 
1820  taskdata->ompt_task_info.frame.enter_frame = frame_address;
1821 
1822  if (ompt_enabled.ompt_callback_sync_region) {
1823  ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
1824  ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
1825  my_task_data, return_address);
1826  }
1827 
1828  if (ompt_enabled.ompt_callback_sync_region_wait) {
1829  ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
1830  ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
1831  my_task_data, return_address);
1832  }
1833  }
1834 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
1835 
1836 // Debugger: The taskwait is active. Store location and thread encountered the
1837 // taskwait.
1838 #if USE_ITT_BUILD
1839 // Note: These values are used by ITT events as well.
1840 #endif /* USE_ITT_BUILD */
1841  taskdata->td_taskwait_counter += 1;
1842  taskdata->td_taskwait_ident = loc_ref;
1843  taskdata->td_taskwait_thread = gtid + 1;
1844 
1845 #if USE_ITT_BUILD
1846  void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
1847  if (itt_sync_obj != NULL)
1848  __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
1849 #endif /* USE_ITT_BUILD */
1850 
1851  bool must_wait =
1852  !taskdata->td_flags.team_serial && !taskdata->td_flags.final;
1853 
1854 #if OMP_45_ENABLED
1855  must_wait = must_wait || (thread->th.th_task_team != NULL &&
1856  thread->th.th_task_team->tt.tt_found_proxy_tasks);
1857 #endif
1858  if (must_wait) {
1859  kmp_flag_32 flag(RCAST(std::atomic<kmp_uint32> *,
1860  &(taskdata->td_incomplete_child_tasks)),
1861  0U);
1862  while (KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) != 0) {
1863  flag.execute_tasks(thread, gtid, FALSE,
1864  &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
1865  __kmp_task_stealing_constraint);
1866  }
1867  }
1868 #if USE_ITT_BUILD
1869  if (itt_sync_obj != NULL)
1870  __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
1871 #endif /* USE_ITT_BUILD */
1872 
1873  // Debugger: The taskwait is completed. Location remains, but thread is
1874  // negated.
1875  taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
1876 
1877 #if OMPT_SUPPORT && OMPT_OPTIONAL
1878  if (ompt) {
1879  if (ompt_enabled.ompt_callback_sync_region_wait) {
1880  ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
1881  ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
1882  my_task_data, return_address);
1883  }
1884  if (ompt_enabled.ompt_callback_sync_region) {
1885  ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
1886  ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
1887  my_task_data, return_address);
1888  }
1889  taskdata->ompt_task_info.frame.enter_frame = NULL;
1890  }
1891 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
1892 
1893  ANNOTATE_HAPPENS_AFTER(taskdata);
1894  }
1895 
1896  KA_TRACE(10, ("__kmpc_omp_taskwait(exit): T#%d task %p finished waiting, "
1897  "returning TASK_CURRENT_NOT_QUEUED\n",
1898  gtid, taskdata));
1899 
1900  return TASK_CURRENT_NOT_QUEUED;
1901 }
1902 
1903 #if OMPT_SUPPORT && OMPT_OPTIONAL
1904 OMPT_NOINLINE
1905 static kmp_int32 __kmpc_omp_taskwait_ompt(ident_t *loc_ref, kmp_int32 gtid,
1906  void *frame_address,
1907  void *return_address) {
1908  return __kmpc_omp_taskwait_template<true>(loc_ref, gtid, frame_address,
1909  return_address);
1910 }
1911 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
1912 
1913 // __kmpc_omp_taskwait: Wait until all tasks generated by the current task are
1914 // complete
1915 kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid) {
1916 #if OMPT_SUPPORT && OMPT_OPTIONAL
1917  if (UNLIKELY(ompt_enabled.enabled)) {
1918  OMPT_STORE_RETURN_ADDRESS(gtid);
1919  return __kmpc_omp_taskwait_ompt(loc_ref, gtid, OMPT_GET_FRAME_ADDRESS(1),
1920  OMPT_LOAD_RETURN_ADDRESS(gtid));
1921  }
1922 #endif
1923  return __kmpc_omp_taskwait_template<false>(loc_ref, gtid, NULL, NULL);
1924 }
1925 
1926 // __kmpc_omp_taskyield: switch to a different task
1927 kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid, int end_part) {
1928  kmp_taskdata_t *taskdata;
1929  kmp_info_t *thread;
1930  int thread_finished = FALSE;
1931 
1932  KMP_COUNT_BLOCK(OMP_TASKYIELD);
1933  KMP_SET_THREAD_STATE_BLOCK(TASKYIELD);
1934 
1935  KA_TRACE(10, ("__kmpc_omp_taskyield(enter): T#%d loc=%p end_part = %d\n",
1936  gtid, loc_ref, end_part));
1937 
1938  if (__kmp_tasking_mode != tskm_immediate_exec && __kmp_init_parallel) {
1939  thread = __kmp_threads[gtid];
1940  taskdata = thread->th.th_current_task;
1941 // Should we model this as a task wait or not?
1942 // Debugger: The taskwait is active. Store location and thread encountered the
1943 // taskwait.
1944 #if USE_ITT_BUILD
1945 // Note: These values are used by ITT events as well.
1946 #endif /* USE_ITT_BUILD */
1947  taskdata->td_taskwait_counter += 1;
1948  taskdata->td_taskwait_ident = loc_ref;
1949  taskdata->td_taskwait_thread = gtid + 1;
1950 
1951 #if USE_ITT_BUILD
1952  void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
1953  if (itt_sync_obj != NULL)
1954  __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
1955 #endif /* USE_ITT_BUILD */
1956  if (!taskdata->td_flags.team_serial) {
1957  kmp_task_team_t *task_team = thread->th.th_task_team;
1958  if (task_team != NULL) {
1959  if (KMP_TASKING_ENABLED(task_team)) {
1960 #if OMPT_SUPPORT
1961  if (UNLIKELY(ompt_enabled.enabled))
1962  thread->th.ompt_thread_info.ompt_task_yielded = 1;
1963 #endif
1964  __kmp_execute_tasks_32(
1965  thread, gtid, NULL, FALSE,
1966  &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
1967  __kmp_task_stealing_constraint);
1968 #if OMPT_SUPPORT
1969  if (UNLIKELY(ompt_enabled.enabled))
1970  thread->th.ompt_thread_info.ompt_task_yielded = 0;
1971 #endif
1972  }
1973  }
1974  }
1975 #if USE_ITT_BUILD
1976  if (itt_sync_obj != NULL)
1977  __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
1978 #endif /* USE_ITT_BUILD */
1979 
1980  // Debugger: The taskwait is completed. Location remains, but thread is
1981  // negated.
1982  taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
1983  }
1984 
1985  KA_TRACE(10, ("__kmpc_omp_taskyield(exit): T#%d task %p resuming, "
1986  "returning TASK_CURRENT_NOT_QUEUED\n",
1987  gtid, taskdata));
1988 
1989  return TASK_CURRENT_NOT_QUEUED;
1990 }
1991 
1992 #if OMP_50_ENABLED
1993 // Task Reduction implementation
1994 
1995 typedef struct kmp_task_red_flags {
1996  unsigned lazy_priv : 1; // hint: (1) use lazy allocation (big objects)
1997  unsigned reserved31 : 31;
1998 } kmp_task_red_flags_t;
1999 
2000 // internal structure for reduction data item related info
2001 typedef struct kmp_task_red_data {
2002  void *reduce_shar; // shared reduction item
2003  size_t reduce_size; // size of data item
2004  void *reduce_priv; // thread specific data
2005  void *reduce_pend; // end of private data for comparison op
2006  void *reduce_init; // data initialization routine
2007  void *reduce_fini; // data finalization routine
2008  void *reduce_comb; // data combiner routine
2009  kmp_task_red_flags_t flags; // flags for additional info from compiler
2010 } kmp_task_red_data_t;
2011 
2012 // structure sent us by compiler - one per reduction item
2013 typedef struct kmp_task_red_input {
2014  void *reduce_shar; // shared reduction item
2015  size_t reduce_size; // size of data item
2016  void *reduce_init; // data initialization routine
2017  void *reduce_fini; // data finalization routine
2018  void *reduce_comb; // data combiner routine
2019  kmp_task_red_flags_t flags; // flags for additional info from compiler
2020 } kmp_task_red_input_t;
2021 
2031 void *__kmpc_task_reduction_init(int gtid, int num, void *data) {
2032  kmp_info_t *thread = __kmp_threads[gtid];
2033  kmp_taskgroup_t *tg = thread->th.th_current_task->td_taskgroup;
2034  kmp_int32 nth = thread->th.th_team_nproc;
2035  kmp_task_red_input_t *input = (kmp_task_red_input_t *)data;
2036  kmp_task_red_data_t *arr;
2037 
2038  // check input data just in case
2039  KMP_ASSERT(tg != NULL);
2040  KMP_ASSERT(data != NULL);
2041  KMP_ASSERT(num > 0);
2042  if (nth == 1) {
2043  KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, tg %p, exiting nth=1\n",
2044  gtid, tg));
2045  return (void *)tg;
2046  }
2047  KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, taskgroup %p, #items %d\n",
2048  gtid, tg, num));
2049  arr = (kmp_task_red_data_t *)__kmp_thread_malloc(
2050  thread, num * sizeof(kmp_task_red_data_t));
2051  for (int i = 0; i < num; ++i) {
2052  void (*f_init)(void *) = (void (*)(void *))(input[i].reduce_init);
2053  size_t size = input[i].reduce_size - 1;
2054  // round the size up to cache line per thread-specific item
2055  size += CACHE_LINE - size % CACHE_LINE;
2056  KMP_ASSERT(input[i].reduce_comb != NULL); // combiner is mandatory
2057  arr[i].reduce_shar = input[i].reduce_shar;
2058  arr[i].reduce_size = size;
2059  arr[i].reduce_init = input[i].reduce_init;
2060  arr[i].reduce_fini = input[i].reduce_fini;
2061  arr[i].reduce_comb = input[i].reduce_comb;
2062  arr[i].flags = input[i].flags;
2063  if (!input[i].flags.lazy_priv) {
2064  // allocate cache-line aligned block and fill it with zeros
2065  arr[i].reduce_priv = __kmp_allocate(nth * size);
2066  arr[i].reduce_pend = (char *)(arr[i].reduce_priv) + nth * size;
2067  if (f_init != NULL) {
2068  // initialize thread-specific items
2069  for (int j = 0; j < nth; ++j) {
2070  f_init((char *)(arr[i].reduce_priv) + j * size);
2071  }
2072  }
2073  } else {
2074  // only allocate space for pointers now,
2075  // objects will be lazily allocated/initialized once requested
2076  arr[i].reduce_priv = __kmp_allocate(nth * sizeof(void *));
2077  }
2078  }
2079  tg->reduce_data = (void *)arr;
2080  tg->reduce_num_data = num;
2081  return (void *)tg;
2082 }
2083 
2093 void *__kmpc_task_reduction_get_th_data(int gtid, void *tskgrp, void *data) {
2094  kmp_info_t *thread = __kmp_threads[gtid];
2095  kmp_int32 nth = thread->th.th_team_nproc;
2096  if (nth == 1)
2097  return data; // nothing to do
2098 
2099  kmp_taskgroup_t *tg = (kmp_taskgroup_t *)tskgrp;
2100  if (tg == NULL)
2101  tg = thread->th.th_current_task->td_taskgroup;
2102  KMP_ASSERT(tg != NULL);
2103  kmp_task_red_data_t *arr = (kmp_task_red_data_t *)(tg->reduce_data);
2104  kmp_int32 num = tg->reduce_num_data;
2105  kmp_int32 tid = thread->th.th_info.ds.ds_tid;
2106 
2107  KMP_ASSERT(data != NULL);
2108  while (tg != NULL) {
2109  for (int i = 0; i < num; ++i) {
2110  if (!arr[i].flags.lazy_priv) {
2111  if (data == arr[i].reduce_shar ||
2112  (data >= arr[i].reduce_priv && data < arr[i].reduce_pend))
2113  return (char *)(arr[i].reduce_priv) + tid * arr[i].reduce_size;
2114  } else {
2115  // check shared location first
2116  void **p_priv = (void **)(arr[i].reduce_priv);
2117  if (data == arr[i].reduce_shar)
2118  goto found;
2119  // check if we get some thread specific location as parameter
2120  for (int j = 0; j < nth; ++j)
2121  if (data == p_priv[j])
2122  goto found;
2123  continue; // not found, continue search
2124  found:
2125  if (p_priv[tid] == NULL) {
2126  // allocate thread specific object lazily
2127  void (*f_init)(void *) = (void (*)(void *))(arr[i].reduce_init);
2128  p_priv[tid] = __kmp_allocate(arr[i].reduce_size);
2129  if (f_init != NULL) {
2130  f_init(p_priv[tid]);
2131  }
2132  }
2133  return p_priv[tid];
2134  }
2135  }
2136  tg = tg->parent;
2137  arr = (kmp_task_red_data_t *)(tg->reduce_data);
2138  num = tg->reduce_num_data;
2139  }
2140  KMP_ASSERT2(0, "Unknown task reduction item");
2141  return NULL; // ERROR, this line never executed
2142 }
2143 
2144 // Finalize task reduction.
2145 // Called from __kmpc_end_taskgroup()
2146 static void __kmp_task_reduction_fini(kmp_info_t *th, kmp_taskgroup_t *tg) {
2147  kmp_int32 nth = th->th.th_team_nproc;
2148  KMP_DEBUG_ASSERT(nth > 1); // should not be called if nth == 1
2149  kmp_task_red_data_t *arr = (kmp_task_red_data_t *)tg->reduce_data;
2150  kmp_int32 num = tg->reduce_num_data;
2151  for (int i = 0; i < num; ++i) {
2152  void *sh_data = arr[i].reduce_shar;
2153  void (*f_fini)(void *) = (void (*)(void *))(arr[i].reduce_fini);
2154  void (*f_comb)(void *, void *) =
2155  (void (*)(void *, void *))(arr[i].reduce_comb);
2156  if (!arr[i].flags.lazy_priv) {
2157  void *pr_data = arr[i].reduce_priv;
2158  size_t size = arr[i].reduce_size;
2159  for (int j = 0; j < nth; ++j) {
2160  void *priv_data = (char *)pr_data + j * size;
2161  f_comb(sh_data, priv_data); // combine results
2162  if (f_fini)
2163  f_fini(priv_data); // finalize if needed
2164  }
2165  } else {
2166  void **pr_data = (void **)(arr[i].reduce_priv);
2167  for (int j = 0; j < nth; ++j) {
2168  if (pr_data[j] != NULL) {
2169  f_comb(sh_data, pr_data[j]); // combine results
2170  if (f_fini)
2171  f_fini(pr_data[j]); // finalize if needed
2172  __kmp_free(pr_data[j]);
2173  }
2174  }
2175  }
2176  __kmp_free(arr[i].reduce_priv);
2177  }
2178  __kmp_thread_free(th, arr);
2179  tg->reduce_data = NULL;
2180  tg->reduce_num_data = 0;
2181 }
2182 #endif
2183 
2184 #if OMP_40_ENABLED
2185 // __kmpc_taskgroup: Start a new taskgroup
2186 void __kmpc_taskgroup(ident_t *loc, int gtid) {
2187  kmp_info_t *thread = __kmp_threads[gtid];
2188  kmp_taskdata_t *taskdata = thread->th.th_current_task;
2189  kmp_taskgroup_t *tg_new =
2190  (kmp_taskgroup_t *)__kmp_thread_malloc(thread, sizeof(kmp_taskgroup_t));
2191  KA_TRACE(10, ("__kmpc_taskgroup: T#%d loc=%p group=%p\n", gtid, loc, tg_new));
2192  KMP_ATOMIC_ST_RLX(&tg_new->count, 0);
2193  KMP_ATOMIC_ST_RLX(&tg_new->cancel_request, cancel_noreq);
2194  tg_new->parent = taskdata->td_taskgroup;
2195 #if OMP_50_ENABLED
2196  tg_new->reduce_data = NULL;
2197  tg_new->reduce_num_data = 0;
2198 #endif
2199  taskdata->td_taskgroup = tg_new;
2200 
2201 #if OMPT_SUPPORT && OMPT_OPTIONAL
2202  if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
2203  void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2204  if (!codeptr)
2205  codeptr = OMPT_GET_RETURN_ADDRESS(0);
2206  kmp_team_t *team = thread->th.th_team;
2207  ompt_data_t my_task_data = taskdata->ompt_task_info.task_data;
2208  // FIXME: I think this is wrong for lwt!
2209  ompt_data_t my_parallel_data = team->t.ompt_team_info.parallel_data;
2210 
2211  ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2212  ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2213  &(my_task_data), codeptr);
2214  }
2215 #endif
2216 }
2217 
2218 // __kmpc_end_taskgroup: Wait until all tasks generated by the current task
2219 // and its descendants are complete
2220 void __kmpc_end_taskgroup(ident_t *loc, int gtid) {
2221  kmp_info_t *thread = __kmp_threads[gtid];
2222  kmp_taskdata_t *taskdata = thread->th.th_current_task;
2223  kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
2224  int thread_finished = FALSE;
2225 
2226 #if OMPT_SUPPORT && OMPT_OPTIONAL
2227  kmp_team_t *team;
2228  ompt_data_t my_task_data;
2229  ompt_data_t my_parallel_data;
2230  void *codeptr;
2231  if (UNLIKELY(ompt_enabled.enabled)) {
2232  team = thread->th.th_team;
2233  my_task_data = taskdata->ompt_task_info.task_data;
2234  // FIXME: I think this is wrong for lwt!
2235  my_parallel_data = team->t.ompt_team_info.parallel_data;
2236  codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2237  if (!codeptr)
2238  codeptr = OMPT_GET_RETURN_ADDRESS(0);
2239  }
2240 #endif
2241 
2242  KA_TRACE(10, ("__kmpc_end_taskgroup(enter): T#%d loc=%p\n", gtid, loc));
2243  KMP_DEBUG_ASSERT(taskgroup != NULL);
2244  KMP_SET_THREAD_STATE_BLOCK(TASKGROUP);
2245 
2246  if (__kmp_tasking_mode != tskm_immediate_exec) {
2247  // mark task as waiting not on a barrier
2248  taskdata->td_taskwait_counter += 1;
2249  taskdata->td_taskwait_ident = loc;
2250  taskdata->td_taskwait_thread = gtid + 1;
2251 #if USE_ITT_BUILD
2252  // For ITT the taskgroup wait is similar to taskwait until we need to
2253  // distinguish them
2254  void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
2255  if (itt_sync_obj != NULL)
2256  __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
2257 #endif /* USE_ITT_BUILD */
2258 
2259 #if OMPT_SUPPORT && OMPT_OPTIONAL
2260  if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
2261  ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2262  ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2263  &(my_task_data), codeptr);
2264  }
2265 #endif
2266 
2267 #if OMP_45_ENABLED
2268  if (!taskdata->td_flags.team_serial ||
2269  (thread->th.th_task_team != NULL &&
2270  thread->th.th_task_team->tt.tt_found_proxy_tasks))
2271 #else
2272  if (!taskdata->td_flags.team_serial)
2273 #endif
2274  {
2275  kmp_flag_32 flag(RCAST(std::atomic<kmp_uint32> *, &(taskgroup->count)),
2276  0U);
2277  while (KMP_ATOMIC_LD_ACQ(&taskgroup->count) != 0) {
2278  flag.execute_tasks(thread, gtid, FALSE,
2279  &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
2280  __kmp_task_stealing_constraint);
2281  }
2282  }
2283  taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread; // end waiting
2284 
2285 #if OMPT_SUPPORT && OMPT_OPTIONAL
2286  if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
2287  ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2288  ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2289  &(my_task_data), codeptr);
2290  }
2291 #endif
2292 
2293 #if USE_ITT_BUILD
2294  if (itt_sync_obj != NULL)
2295  __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
2296 #endif /* USE_ITT_BUILD */
2297  }
2298  KMP_DEBUG_ASSERT(taskgroup->count == 0);
2299 
2300 #if OMP_50_ENABLED
2301  if (taskgroup->reduce_data != NULL) // need to reduce?
2302  __kmp_task_reduction_fini(thread, taskgroup);
2303 #endif
2304  // Restore parent taskgroup for the current task
2305  taskdata->td_taskgroup = taskgroup->parent;
2306  __kmp_thread_free(thread, taskgroup);
2307 
2308  KA_TRACE(10, ("__kmpc_end_taskgroup(exit): T#%d task %p finished waiting\n",
2309  gtid, taskdata));
2310  ANNOTATE_HAPPENS_AFTER(taskdata);
2311 
2312 #if OMPT_SUPPORT && OMPT_OPTIONAL
2313  if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
2314  ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2315  ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2316  &(my_task_data), codeptr);
2317  }
2318 #endif
2319 }
2320 #endif
2321 
2322 // __kmp_remove_my_task: remove a task from my own deque
2323 static kmp_task_t *__kmp_remove_my_task(kmp_info_t *thread, kmp_int32 gtid,
2324  kmp_task_team_t *task_team,
2325  kmp_int32 is_constrained) {
2326  kmp_task_t *task;
2327  kmp_taskdata_t *taskdata;
2328  kmp_thread_data_t *thread_data;
2329  kmp_uint32 tail;
2330 
2331  KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2332  KMP_DEBUG_ASSERT(task_team->tt.tt_threads_data !=
2333  NULL); // Caller should check this condition
2334 
2335  thread_data = &task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
2336 
2337  KA_TRACE(10, ("__kmp_remove_my_task(enter): T#%d ntasks=%d head=%u tail=%u\n",
2338  gtid, thread_data->td.td_deque_ntasks,
2339  thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2340 
2341  if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
2342  KA_TRACE(10,
2343  ("__kmp_remove_my_task(exit #1): T#%d No tasks to remove: "
2344  "ntasks=%d head=%u tail=%u\n",
2345  gtid, thread_data->td.td_deque_ntasks,
2346  thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2347  return NULL;
2348  }
2349 
2350  __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
2351 
2352  if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
2353  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2354  KA_TRACE(10,
2355  ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
2356  "ntasks=%d head=%u tail=%u\n",
2357  gtid, thread_data->td.td_deque_ntasks,
2358  thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2359  return NULL;
2360  }
2361 
2362  tail = (thread_data->td.td_deque_tail - 1) &
2363  TASK_DEQUE_MASK(thread_data->td); // Wrap index.
2364  taskdata = thread_data->td.td_deque[tail];
2365 
2366  if (!__kmp_task_is_allowed(gtid, is_constrained, taskdata,
2367  thread->th.th_current_task)) {
2368  // The TSC does not allow to steal victim task
2369  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2370  KA_TRACE(10,
2371  ("__kmp_remove_my_task(exit #3): T#%d TSC blocks tail task: "
2372  "ntasks=%d head=%u tail=%u\n",
2373  gtid, thread_data->td.td_deque_ntasks,
2374  thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2375  return NULL;
2376  }
2377 
2378  thread_data->td.td_deque_tail = tail;
2379  TCW_4(thread_data->td.td_deque_ntasks, thread_data->td.td_deque_ntasks - 1);
2380 
2381  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2382 
2383  KA_TRACE(10, ("__kmp_remove_my_task(exit #4): T#%d task %p removed: "
2384  "ntasks=%d head=%u tail=%u\n",
2385  gtid, taskdata, thread_data->td.td_deque_ntasks,
2386  thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2387 
2388  task = KMP_TASKDATA_TO_TASK(taskdata);
2389  return task;
2390 }
2391 
2392 // __kmp_steal_task: remove a task from another thread's deque
2393 // Assume that calling thread has already checked existence of
2394 // task_team thread_data before calling this routine.
2395 static kmp_task_t *__kmp_steal_task(kmp_info_t *victim_thr, kmp_int32 gtid,
2396  kmp_task_team_t *task_team,
2397  std::atomic<kmp_int32> *unfinished_threads,
2398  int *thread_finished,
2399  kmp_int32 is_constrained) {
2400  kmp_task_t *task;
2401  kmp_taskdata_t *taskdata;
2402  kmp_taskdata_t *current;
2403  kmp_thread_data_t *victim_td, *threads_data;
2404  kmp_int32 target;
2405  kmp_int32 victim_tid;
2406 
2407  KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2408 
2409  threads_data = task_team->tt.tt_threads_data;
2410  KMP_DEBUG_ASSERT(threads_data != NULL); // Caller should check this condition
2411 
2412  victim_tid = victim_thr->th.th_info.ds.ds_tid;
2413  victim_td = &threads_data[victim_tid];
2414 
2415  KA_TRACE(10, ("__kmp_steal_task(enter): T#%d try to steal from T#%d: "
2416  "task_team=%p ntasks=%d head=%u tail=%u\n",
2417  gtid, __kmp_gtid_from_thread(victim_thr), task_team,
2418  victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
2419  victim_td->td.td_deque_tail));
2420 
2421  if (TCR_4(victim_td->td.td_deque_ntasks) == 0) {
2422  KA_TRACE(10, ("__kmp_steal_task(exit #1): T#%d could not steal from T#%d: "
2423  "task_team=%p ntasks=%d head=%u tail=%u\n",
2424  gtid, __kmp_gtid_from_thread(victim_thr), task_team,
2425  victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
2426  victim_td->td.td_deque_tail));
2427  return NULL;
2428  }
2429 
2430  __kmp_acquire_bootstrap_lock(&victim_td->td.td_deque_lock);
2431 
2432  int ntasks = TCR_4(victim_td->td.td_deque_ntasks);
2433  // Check again after we acquire the lock
2434  if (ntasks == 0) {
2435  __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2436  KA_TRACE(10, ("__kmp_steal_task(exit #2): T#%d could not steal from T#%d: "
2437  "task_team=%p ntasks=%d head=%u tail=%u\n",
2438  gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2439  victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2440  return NULL;
2441  }
2442 
2443  KMP_DEBUG_ASSERT(victim_td->td.td_deque != NULL);
2444  current = __kmp_threads[gtid]->th.th_current_task;
2445  taskdata = victim_td->td.td_deque[victim_td->td.td_deque_head];
2446  if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) {
2447  // Bump head pointer and Wrap.
2448  victim_td->td.td_deque_head =
2449  (victim_td->td.td_deque_head + 1) & TASK_DEQUE_MASK(victim_td->td);
2450  } else {
2451  if (!task_team->tt.tt_untied_task_encountered) {
2452  // The TSC does not allow to steal victim task
2453  __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2454  KA_TRACE(10, ("__kmp_steal_task(exit #3): T#%d could not steal from "
2455  "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
2456  gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2457  victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2458  return NULL;
2459  }
2460  int i;
2461  // walk through victim's deque trying to steal any task
2462  target = victim_td->td.td_deque_head;
2463  taskdata = NULL;
2464  for (i = 1; i < ntasks; ++i) {
2465  target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
2466  taskdata = victim_td->td.td_deque[target];
2467  if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) {
2468  break; // found victim task
2469  } else {
2470  taskdata = NULL;
2471  }
2472  }
2473  if (taskdata == NULL) {
2474  // No appropriate candidate to steal found
2475  __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2476  KA_TRACE(10, ("__kmp_steal_task(exit #4): T#%d could not steal from "
2477  "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
2478  gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2479  victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2480  return NULL;
2481  }
2482  int prev = target;
2483  for (i = i + 1; i < ntasks; ++i) {
2484  // shift remaining tasks in the deque left by 1
2485  target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
2486  victim_td->td.td_deque[prev] = victim_td->td.td_deque[target];
2487  prev = target;
2488  }
2489  KMP_DEBUG_ASSERT(
2490  victim_td->td.td_deque_tail ==
2491  (kmp_uint32)((target + 1) & TASK_DEQUE_MASK(victim_td->td)));
2492  victim_td->td.td_deque_tail = target; // tail -= 1 (wrapped))
2493  }
2494  if (*thread_finished) {
2495  // We need to un-mark this victim as a finished victim. This must be done
2496  // before releasing the lock, or else other threads (starting with the
2497  // master victim) might be prematurely released from the barrier!!!
2498  kmp_int32 count;
2499 
2500  count = KMP_ATOMIC_INC(unfinished_threads);
2501 
2502  KA_TRACE(
2503  20,
2504  ("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n",
2505  gtid, count + 1, task_team));
2506 
2507  *thread_finished = FALSE;
2508  }
2509  TCW_4(victim_td->td.td_deque_ntasks, ntasks - 1);
2510 
2511  __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2512 
2513  KMP_COUNT_BLOCK(TASK_stolen);
2514  KA_TRACE(10,
2515  ("__kmp_steal_task(exit #5): T#%d stole task %p from T#%d: "
2516  "task_team=%p ntasks=%d head=%u tail=%u\n",
2517  gtid, taskdata, __kmp_gtid_from_thread(victim_thr), task_team,
2518  ntasks, victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2519 
2520  task = KMP_TASKDATA_TO_TASK(taskdata);
2521  return task;
2522 }
2523 
2524 // __kmp_execute_tasks_template: Choose and execute tasks until either the
2525 // condition is statisfied (return true) or there are none left (return false).
2526 //
2527 // final_spin is TRUE if this is the spin at the release barrier.
2528 // thread_finished indicates whether the thread is finished executing all
2529 // the tasks it has on its deque, and is at the release barrier.
2530 // spinner is the location on which to spin.
2531 // spinner == NULL means only execute a single task and return.
2532 // checker is the value to check to terminate the spin.
2533 template <class C>
2534 static inline int __kmp_execute_tasks_template(
2535  kmp_info_t *thread, kmp_int32 gtid, C *flag, int final_spin,
2536  int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2537  kmp_int32 is_constrained) {
2538  kmp_task_team_t *task_team = thread->th.th_task_team;
2539  kmp_thread_data_t *threads_data;
2540  kmp_task_t *task;
2541  kmp_info_t *other_thread;
2542  kmp_taskdata_t *current_task = thread->th.th_current_task;
2543  std::atomic<kmp_int32> *unfinished_threads;
2544  kmp_int32 nthreads, victim_tid = -2, use_own_tasks = 1, new_victim = 0,
2545  tid = thread->th.th_info.ds.ds_tid;
2546 
2547  KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2548  KMP_DEBUG_ASSERT(thread == __kmp_threads[gtid]);
2549 
2550  if (task_team == NULL || current_task == NULL)
2551  return FALSE;
2552 
2553  KA_TRACE(15, ("__kmp_execute_tasks_template(enter): T#%d final_spin=%d "
2554  "*thread_finished=%d\n",
2555  gtid, final_spin, *thread_finished));
2556 
2557  thread->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
2558  threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
2559  KMP_DEBUG_ASSERT(threads_data != NULL);
2560 
2561  nthreads = task_team->tt.tt_nproc;
2562  unfinished_threads = &(task_team->tt.tt_unfinished_threads);
2563 #if OMP_45_ENABLED
2564  KMP_DEBUG_ASSERT(nthreads > 1 || task_team->tt.tt_found_proxy_tasks);
2565 #else
2566  KMP_DEBUG_ASSERT(nthreads > 1);
2567 #endif
2568  KMP_DEBUG_ASSERT(*unfinished_threads >= 0);
2569 
2570  while (1) { // Outer loop keeps trying to find tasks in case of single thread
2571  // getting tasks from target constructs
2572  while (1) { // Inner loop to find a task and execute it
2573  task = NULL;
2574  if (use_own_tasks) { // check on own queue first
2575  task = __kmp_remove_my_task(thread, gtid, task_team, is_constrained);
2576  }
2577  if ((task == NULL) && (nthreads > 1)) { // Steal a task
2578  int asleep = 1;
2579  use_own_tasks = 0;
2580  // Try to steal from the last place I stole from successfully.
2581  if (victim_tid == -2) { // haven't stolen anything yet
2582  victim_tid = threads_data[tid].td.td_deque_last_stolen;
2583  if (victim_tid !=
2584  -1) // if we have a last stolen from victim, get the thread
2585  other_thread = threads_data[victim_tid].td.td_thr;
2586  }
2587  if (victim_tid != -1) { // found last victim
2588  asleep = 0;
2589  } else if (!new_victim) { // no recent steals and we haven't already
2590  // used a new victim; select a random thread
2591  do { // Find a different thread to steal work from.
2592  // Pick a random thread. Initial plan was to cycle through all the
2593  // threads, and only return if we tried to steal from every thread,
2594  // and failed. Arch says that's not such a great idea.
2595  victim_tid = __kmp_get_random(thread) % (nthreads - 1);
2596  if (victim_tid >= tid) {
2597  ++victim_tid; // Adjusts random distribution to exclude self
2598  }
2599  // Found a potential victim
2600  other_thread = threads_data[victim_tid].td.td_thr;
2601  // There is a slight chance that __kmp_enable_tasking() did not wake
2602  // up all threads waiting at the barrier. If victim is sleeping,
2603  // then wake it up. Since we were going to pay the cache miss
2604  // penalty for referencing another thread's kmp_info_t struct
2605  // anyway,
2606  // the check shouldn't cost too much performance at this point. In
2607  // extra barrier mode, tasks do not sleep at the separate tasking
2608  // barrier, so this isn't a problem.
2609  asleep = 0;
2610  if ((__kmp_tasking_mode == tskm_task_teams) &&
2611  (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) &&
2612  (TCR_PTR(CCAST(void *, other_thread->th.th_sleep_loc)) !=
2613  NULL)) {
2614  asleep = 1;
2615  __kmp_null_resume_wrapper(__kmp_gtid_from_thread(other_thread),
2616  other_thread->th.th_sleep_loc);
2617  // A sleeping thread should not have any tasks on it's queue.
2618  // There is a slight possibility that it resumes, steals a task
2619  // from another thread, which spawns more tasks, all in the time
2620  // that it takes this thread to check => don't write an assertion
2621  // that the victim's queue is empty. Try stealing from a
2622  // different thread.
2623  }
2624  } while (asleep);
2625  }
2626 
2627  if (!asleep) {
2628  // We have a victim to try to steal from
2629  task = __kmp_steal_task(other_thread, gtid, task_team,
2630  unfinished_threads, thread_finished,
2631  is_constrained);
2632  }
2633  if (task != NULL) { // set last stolen to victim
2634  if (threads_data[tid].td.td_deque_last_stolen != victim_tid) {
2635  threads_data[tid].td.td_deque_last_stolen = victim_tid;
2636  // The pre-refactored code did not try more than 1 successful new
2637  // vicitm, unless the last one generated more local tasks;
2638  // new_victim keeps track of this
2639  new_victim = 1;
2640  }
2641  } else { // No tasks found; unset last_stolen
2642  KMP_CHECK_UPDATE(threads_data[tid].td.td_deque_last_stolen, -1);
2643  victim_tid = -2; // no successful victim found
2644  }
2645  }
2646 
2647  if (task == NULL) // break out of tasking loop
2648  break;
2649 
2650 // Found a task; execute it
2651 #if USE_ITT_BUILD && USE_ITT_NOTIFY
2652  if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
2653  if (itt_sync_obj == NULL) { // we are at fork barrier where we could not
2654  // get the object reliably
2655  itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier);
2656  }
2657  __kmp_itt_task_starting(itt_sync_obj);
2658  }
2659 #endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
2660  __kmp_invoke_task(gtid, task, current_task);
2661 #if USE_ITT_BUILD
2662  if (itt_sync_obj != NULL)
2663  __kmp_itt_task_finished(itt_sync_obj);
2664 #endif /* USE_ITT_BUILD */
2665  // If this thread is only partway through the barrier and the condition is
2666  // met, then return now, so that the barrier gather/release pattern can
2667  // proceed. If this thread is in the last spin loop in the barrier,
2668  // waiting to be released, we know that the termination condition will not
2669  // be satisified, so don't waste any cycles checking it.
2670  if (flag == NULL || (!final_spin && flag->done_check())) {
2671  KA_TRACE(
2672  15,
2673  ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
2674  gtid));
2675  return TRUE;
2676  }
2677  if (thread->th.th_task_team == NULL) {
2678  break;
2679  }
2680  // Yield before executing next task
2681  KMP_YIELD(__kmp_library == library_throughput);
2682  // If execution of a stolen task results in more tasks being placed on our
2683  // run queue, reset use_own_tasks
2684  if (!use_own_tasks && TCR_4(threads_data[tid].td.td_deque_ntasks) != 0) {
2685  KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d stolen task spawned "
2686  "other tasks, restart\n",
2687  gtid));
2688  use_own_tasks = 1;
2689  new_victim = 0;
2690  }
2691  }
2692 
2693 // The task source has been exhausted. If in final spin loop of barrier, check
2694 // if termination condition is satisfied.
2695 #if OMP_45_ENABLED
2696  // The work queue may be empty but there might be proxy tasks still
2697  // executing
2698  if (final_spin &&
2699  KMP_ATOMIC_LD_ACQ(&current_task->td_incomplete_child_tasks) == 0)
2700 #else
2701  if (final_spin)
2702 #endif
2703  {
2704  // First, decrement the #unfinished threads, if that has not already been
2705  // done. This decrement might be to the spin location, and result in the
2706  // termination condition being satisfied.
2707  if (!*thread_finished) {
2708  kmp_int32 count;
2709 
2710  count = KMP_ATOMIC_DEC(unfinished_threads) - 1;
2711  KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d dec "
2712  "unfinished_threads to %d task_team=%p\n",
2713  gtid, count, task_team));
2714  *thread_finished = TRUE;
2715  }
2716 
2717  // It is now unsafe to reference thread->th.th_team !!!
2718  // Decrementing task_team->tt.tt_unfinished_threads can allow the master
2719  // thread to pass through the barrier, where it might reset each thread's
2720  // th.th_team field for the next parallel region. If we can steal more
2721  // work, we know that this has not happened yet.
2722  if (flag != NULL && flag->done_check()) {
2723  KA_TRACE(
2724  15,
2725  ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
2726  gtid));
2727  return TRUE;
2728  }
2729  }
2730 
2731  // If this thread's task team is NULL, master has recognized that there are
2732  // no more tasks; bail out
2733  if (thread->th.th_task_team == NULL) {
2734  KA_TRACE(15,
2735  ("__kmp_execute_tasks_template: T#%d no more tasks\n", gtid));
2736  return FALSE;
2737  }
2738 
2739 #if OMP_45_ENABLED
2740  // We could be getting tasks from target constructs; if this is the only
2741  // thread, keep trying to execute tasks from own queue
2742  if (nthreads == 1)
2743  use_own_tasks = 1;
2744  else
2745 #endif
2746  {
2747  KA_TRACE(15,
2748  ("__kmp_execute_tasks_template: T#%d can't find work\n", gtid));
2749  return FALSE;
2750  }
2751  }
2752 }
2753 
2754 int __kmp_execute_tasks_32(
2755  kmp_info_t *thread, kmp_int32 gtid, kmp_flag_32 *flag, int final_spin,
2756  int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2757  kmp_int32 is_constrained) {
2758  return __kmp_execute_tasks_template(
2759  thread, gtid, flag, final_spin,
2760  thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
2761 }
2762 
2763 int __kmp_execute_tasks_64(
2764  kmp_info_t *thread, kmp_int32 gtid, kmp_flag_64 *flag, int final_spin,
2765  int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2766  kmp_int32 is_constrained) {
2767  return __kmp_execute_tasks_template(
2768  thread, gtid, flag, final_spin,
2769  thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
2770 }
2771 
2772 int __kmp_execute_tasks_oncore(
2773  kmp_info_t *thread, kmp_int32 gtid, kmp_flag_oncore *flag, int final_spin,
2774  int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2775  kmp_int32 is_constrained) {
2776  return __kmp_execute_tasks_template(
2777  thread, gtid, flag, final_spin,
2778  thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
2779 }
2780 
2781 // __kmp_enable_tasking: Allocate task team and resume threads sleeping at the
2782 // next barrier so they can assist in executing enqueued tasks.
2783 // First thread in allocates the task team atomically.
2784 static void __kmp_enable_tasking(kmp_task_team_t *task_team,
2785  kmp_info_t *this_thr) {
2786  kmp_thread_data_t *threads_data;
2787  int nthreads, i, is_init_thread;
2788 
2789  KA_TRACE(10, ("__kmp_enable_tasking(enter): T#%d\n",
2790  __kmp_gtid_from_thread(this_thr)));
2791 
2792  KMP_DEBUG_ASSERT(task_team != NULL);
2793  KMP_DEBUG_ASSERT(this_thr->th.th_team != NULL);
2794 
2795  nthreads = task_team->tt.tt_nproc;
2796  KMP_DEBUG_ASSERT(nthreads > 0);
2797  KMP_DEBUG_ASSERT(nthreads == this_thr->th.th_team->t.t_nproc);
2798 
2799  // Allocate or increase the size of threads_data if necessary
2800  is_init_thread = __kmp_realloc_task_threads_data(this_thr, task_team);
2801 
2802  if (!is_init_thread) {
2803  // Some other thread already set up the array.
2804  KA_TRACE(
2805  20,
2806  ("__kmp_enable_tasking(exit): T#%d: threads array already set up.\n",
2807  __kmp_gtid_from_thread(this_thr)));
2808  return;
2809  }
2810  threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
2811  KMP_DEBUG_ASSERT(threads_data != NULL);
2812 
2813  if ((__kmp_tasking_mode == tskm_task_teams) &&
2814  (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME)) {
2815  // Release any threads sleeping at the barrier, so that they can steal
2816  // tasks and execute them. In extra barrier mode, tasks do not sleep
2817  // at the separate tasking barrier, so this isn't a problem.
2818  for (i = 0; i < nthreads; i++) {
2819  volatile void *sleep_loc;
2820  kmp_info_t *thread = threads_data[i].td.td_thr;
2821 
2822  if (i == this_thr->th.th_info.ds.ds_tid) {
2823  continue;
2824  }
2825  // Since we haven't locked the thread's suspend mutex lock at this
2826  // point, there is a small window where a thread might be putting
2827  // itself to sleep, but hasn't set the th_sleep_loc field yet.
2828  // To work around this, __kmp_execute_tasks_template() periodically checks
2829  // see if other threads are sleeping (using the same random mechanism that
2830  // is used for task stealing) and awakens them if they are.
2831  if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
2832  NULL) {
2833  KF_TRACE(50, ("__kmp_enable_tasking: T#%d waking up thread T#%d\n",
2834  __kmp_gtid_from_thread(this_thr),
2835  __kmp_gtid_from_thread(thread)));
2836  __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc);
2837  } else {
2838  KF_TRACE(50, ("__kmp_enable_tasking: T#%d don't wake up thread T#%d\n",
2839  __kmp_gtid_from_thread(this_thr),
2840  __kmp_gtid_from_thread(thread)));
2841  }
2842  }
2843  }
2844 
2845  KA_TRACE(10, ("__kmp_enable_tasking(exit): T#%d\n",
2846  __kmp_gtid_from_thread(this_thr)));
2847 }
2848 
2849 /* // TODO: Check the comment consistency
2850  * Utility routines for "task teams". A task team (kmp_task_t) is kind of
2851  * like a shadow of the kmp_team_t data struct, with a different lifetime.
2852  * After a child * thread checks into a barrier and calls __kmp_release() from
2853  * the particular variant of __kmp_<barrier_kind>_barrier_gather(), it can no
2854  * longer assume that the kmp_team_t structure is intact (at any moment, the
2855  * master thread may exit the barrier code and free the team data structure,
2856  * and return the threads to the thread pool).
2857  *
2858  * This does not work with the the tasking code, as the thread is still
2859  * expected to participate in the execution of any tasks that may have been
2860  * spawned my a member of the team, and the thread still needs access to all
2861  * to each thread in the team, so that it can steal work from it.
2862  *
2863  * Enter the existence of the kmp_task_team_t struct. It employs a reference
2864  * counting mechanims, and is allocated by the master thread before calling
2865  * __kmp_<barrier_kind>_release, and then is release by the last thread to
2866  * exit __kmp_<barrier_kind>_release at the next barrier. I.e. the lifetimes
2867  * of the kmp_task_team_t structs for consecutive barriers can overlap
2868  * (and will, unless the master thread is the last thread to exit the barrier
2869  * release phase, which is not typical).
2870  *
2871  * The existence of such a struct is useful outside the context of tasking,
2872  * but for now, I'm trying to keep it specific to the OMP_30_ENABLED macro,
2873  * so that any performance differences show up when comparing the 2.5 vs. 3.0
2874  * libraries.
2875  *
2876  * We currently use the existence of the threads array as an indicator that
2877  * tasks were spawned since the last barrier. If the structure is to be
2878  * useful outside the context of tasking, then this will have to change, but
2879  * not settting the field minimizes the performance impact of tasking on
2880  * barriers, when no explicit tasks were spawned (pushed, actually).
2881  */
2882 
2883 static kmp_task_team_t *__kmp_free_task_teams =
2884  NULL; // Free list for task_team data structures
2885 // Lock for task team data structures
2886 kmp_bootstrap_lock_t __kmp_task_team_lock =
2887  KMP_BOOTSTRAP_LOCK_INITIALIZER(__kmp_task_team_lock);
2888 
2889 // __kmp_alloc_task_deque:
2890 // Allocates a task deque for a particular thread, and initialize the necessary
2891 // data structures relating to the deque. This only happens once per thread
2892 // per task team since task teams are recycled. No lock is needed during
2893 // allocation since each thread allocates its own deque.
2894 static void __kmp_alloc_task_deque(kmp_info_t *thread,
2895  kmp_thread_data_t *thread_data) {
2896  __kmp_init_bootstrap_lock(&thread_data->td.td_deque_lock);
2897  KMP_DEBUG_ASSERT(thread_data->td.td_deque == NULL);
2898 
2899  // Initialize last stolen task field to "none"
2900  thread_data->td.td_deque_last_stolen = -1;
2901 
2902  KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == 0);
2903  KMP_DEBUG_ASSERT(thread_data->td.td_deque_head == 0);
2904  KMP_DEBUG_ASSERT(thread_data->td.td_deque_tail == 0);
2905 
2906  KE_TRACE(
2907  10,
2908  ("__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n",
2909  __kmp_gtid_from_thread(thread), INITIAL_TASK_DEQUE_SIZE, thread_data));
2910  // Allocate space for task deque, and zero the deque
2911  // Cannot use __kmp_thread_calloc() because threads not around for
2912  // kmp_reap_task_team( ).
2913  thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate(
2914  INITIAL_TASK_DEQUE_SIZE * sizeof(kmp_taskdata_t *));
2915  thread_data->td.td_deque_size = INITIAL_TASK_DEQUE_SIZE;
2916 }
2917 
2918 // __kmp_free_task_deque:
2919 // Deallocates a task deque for a particular thread. Happens at library
2920 // deallocation so don't need to reset all thread data fields.
2921 static void __kmp_free_task_deque(kmp_thread_data_t *thread_data) {
2922  if (thread_data->td.td_deque != NULL) {
2923  __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
2924  TCW_4(thread_data->td.td_deque_ntasks, 0);
2925  __kmp_free(thread_data->td.td_deque);
2926  thread_data->td.td_deque = NULL;
2927  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2928  }
2929 
2930 #ifdef BUILD_TIED_TASK_STACK
2931  // GEH: Figure out what to do here for td_susp_tied_tasks
2932  if (thread_data->td.td_susp_tied_tasks.ts_entries != TASK_STACK_EMPTY) {
2933  __kmp_free_task_stack(__kmp_thread_from_gtid(gtid), thread_data);
2934  }
2935 #endif // BUILD_TIED_TASK_STACK
2936 }
2937 
2938 // __kmp_realloc_task_threads_data:
2939 // Allocates a threads_data array for a task team, either by allocating an
2940 // initial array or enlarging an existing array. Only the first thread to get
2941 // the lock allocs or enlarges the array and re-initializes the array eleemnts.
2942 // That thread returns "TRUE", the rest return "FALSE".
2943 // Assumes that the new array size is given by task_team -> tt.tt_nproc.
2944 // The current size is given by task_team -> tt.tt_max_threads.
2945 static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
2946  kmp_task_team_t *task_team) {
2947  kmp_thread_data_t **threads_data_p;
2948  kmp_int32 nthreads, maxthreads;
2949  int is_init_thread = FALSE;
2950 
2951  if (TCR_4(task_team->tt.tt_found_tasks)) {
2952  // Already reallocated and initialized.
2953  return FALSE;
2954  }
2955 
2956  threads_data_p = &task_team->tt.tt_threads_data;
2957  nthreads = task_team->tt.tt_nproc;
2958  maxthreads = task_team->tt.tt_max_threads;
2959 
2960  // All threads must lock when they encounter the first task of the implicit
2961  // task region to make sure threads_data fields are (re)initialized before
2962  // used.
2963  __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
2964 
2965  if (!TCR_4(task_team->tt.tt_found_tasks)) {
2966  // first thread to enable tasking
2967  kmp_team_t *team = thread->th.th_team;
2968  int i;
2969 
2970  is_init_thread = TRUE;
2971  if (maxthreads < nthreads) {
2972 
2973  if (*threads_data_p != NULL) {
2974  kmp_thread_data_t *old_data = *threads_data_p;
2975  kmp_thread_data_t *new_data = NULL;
2976 
2977  KE_TRACE(
2978  10,
2979  ("__kmp_realloc_task_threads_data: T#%d reallocating "
2980  "threads data for task_team %p, new_size = %d, old_size = %d\n",
2981  __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads));
2982  // Reallocate threads_data to have more elements than current array
2983  // Cannot use __kmp_thread_realloc() because threads not around for
2984  // kmp_reap_task_team( ). Note all new array entries are initialized
2985  // to zero by __kmp_allocate().
2986  new_data = (kmp_thread_data_t *)__kmp_allocate(
2987  nthreads * sizeof(kmp_thread_data_t));
2988  // copy old data to new data
2989  KMP_MEMCPY_S((void *)new_data, nthreads * sizeof(kmp_thread_data_t),
2990  (void *)old_data, maxthreads * sizeof(kmp_thread_data_t));
2991 
2992 #ifdef BUILD_TIED_TASK_STACK
2993  // GEH: Figure out if this is the right thing to do
2994  for (i = maxthreads; i < nthreads; i++) {
2995  kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
2996  __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
2997  }
2998 #endif // BUILD_TIED_TASK_STACK
2999  // Install the new data and free the old data
3000  (*threads_data_p) = new_data;
3001  __kmp_free(old_data);
3002  } else {
3003  KE_TRACE(10, ("__kmp_realloc_task_threads_data: T#%d allocating "
3004  "threads data for task_team %p, size = %d\n",
3005  __kmp_gtid_from_thread(thread), task_team, nthreads));
3006  // Make the initial allocate for threads_data array, and zero entries
3007  // Cannot use __kmp_thread_calloc() because threads not around for
3008  // kmp_reap_task_team( ).
3009  ANNOTATE_IGNORE_WRITES_BEGIN();
3010  *threads_data_p = (kmp_thread_data_t *)__kmp_allocate(
3011  nthreads * sizeof(kmp_thread_data_t));
3012  ANNOTATE_IGNORE_WRITES_END();
3013 #ifdef BUILD_TIED_TASK_STACK
3014  // GEH: Figure out if this is the right thing to do
3015  for (i = 0; i < nthreads; i++) {
3016  kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3017  __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
3018  }
3019 #endif // BUILD_TIED_TASK_STACK
3020  }
3021  task_team->tt.tt_max_threads = nthreads;
3022  } else {
3023  // If array has (more than) enough elements, go ahead and use it
3024  KMP_DEBUG_ASSERT(*threads_data_p != NULL);
3025  }
3026 
3027  // initialize threads_data pointers back to thread_info structures
3028  for (i = 0; i < nthreads; i++) {
3029  kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3030  thread_data->td.td_thr = team->t.t_threads[i];
3031 
3032  if (thread_data->td.td_deque_last_stolen >= nthreads) {
3033  // The last stolen field survives across teams / barrier, and the number
3034  // of threads may have changed. It's possible (likely?) that a new
3035  // parallel region will exhibit the same behavior as previous region.
3036  thread_data->td.td_deque_last_stolen = -1;
3037  }
3038  }
3039 
3040  KMP_MB();
3041  TCW_SYNC_4(task_team->tt.tt_found_tasks, TRUE);
3042  }
3043 
3044  __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
3045  return is_init_thread;
3046 }
3047 
3048 // __kmp_free_task_threads_data:
3049 // Deallocates a threads_data array for a task team, including any attached
3050 // tasking deques. Only occurs at library shutdown.
3051 static void __kmp_free_task_threads_data(kmp_task_team_t *task_team) {
3052  __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
3053  if (task_team->tt.tt_threads_data != NULL) {
3054  int i;
3055  for (i = 0; i < task_team->tt.tt_max_threads; i++) {
3056  __kmp_free_task_deque(&task_team->tt.tt_threads_data[i]);
3057  }
3058  __kmp_free(task_team->tt.tt_threads_data);
3059  task_team->tt.tt_threads_data = NULL;
3060  }
3061  __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
3062 }
3063 
3064 // __kmp_allocate_task_team:
3065 // Allocates a task team associated with a specific team, taking it from
3066 // the global task team free list if possible. Also initializes data
3067 // structures.
3068 static kmp_task_team_t *__kmp_allocate_task_team(kmp_info_t *thread,
3069  kmp_team_t *team) {
3070  kmp_task_team_t *task_team = NULL;
3071  int nthreads;
3072 
3073  KA_TRACE(20, ("__kmp_allocate_task_team: T#%d entering; team = %p\n",
3074  (thread ? __kmp_gtid_from_thread(thread) : -1), team));
3075 
3076  if (TCR_PTR(__kmp_free_task_teams) != NULL) {
3077  // Take a task team from the task team pool
3078  __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3079  if (__kmp_free_task_teams != NULL) {
3080  task_team = __kmp_free_task_teams;
3081  TCW_PTR(__kmp_free_task_teams, task_team->tt.tt_next);
3082  task_team->tt.tt_next = NULL;
3083  }
3084  __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3085  }
3086 
3087  if (task_team == NULL) {
3088  KE_TRACE(10, ("__kmp_allocate_task_team: T#%d allocating "
3089  "task team for team %p\n",
3090  __kmp_gtid_from_thread(thread), team));
3091  // Allocate a new task team if one is not available.
3092  // Cannot use __kmp_thread_malloc() because threads not around for
3093  // kmp_reap_task_team( ).
3094  task_team = (kmp_task_team_t *)__kmp_allocate(sizeof(kmp_task_team_t));
3095  __kmp_init_bootstrap_lock(&task_team->tt.tt_threads_lock);
3096  // AC: __kmp_allocate zeroes returned memory
3097  // task_team -> tt.tt_threads_data = NULL;
3098  // task_team -> tt.tt_max_threads = 0;
3099  // task_team -> tt.tt_next = NULL;
3100  }
3101 
3102  TCW_4(task_team->tt.tt_found_tasks, FALSE);
3103 #if OMP_45_ENABLED
3104  TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3105 #endif
3106  task_team->tt.tt_nproc = nthreads = team->t.t_nproc;
3107 
3108  KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads, nthreads);
3109  TCW_4(task_team->tt.tt_active, TRUE);
3110 
3111  KA_TRACE(20, ("__kmp_allocate_task_team: T#%d exiting; task_team = %p "
3112  "unfinished_threads init'd to %d\n",
3113  (thread ? __kmp_gtid_from_thread(thread) : -1), task_team,
3114  KMP_ATOMIC_LD_RLX(&task_team->tt.tt_unfinished_threads)));
3115  return task_team;
3116 }
3117 
3118 // __kmp_free_task_team:
3119 // Frees the task team associated with a specific thread, and adds it
3120 // to the global task team free list.
3121 void __kmp_free_task_team(kmp_info_t *thread, kmp_task_team_t *task_team) {
3122  KA_TRACE(20, ("__kmp_free_task_team: T#%d task_team = %p\n",
3123  thread ? __kmp_gtid_from_thread(thread) : -1, task_team));
3124 
3125  // Put task team back on free list
3126  __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3127 
3128  KMP_DEBUG_ASSERT(task_team->tt.tt_next == NULL);
3129  task_team->tt.tt_next = __kmp_free_task_teams;
3130  TCW_PTR(__kmp_free_task_teams, task_team);
3131 
3132  __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3133 }
3134 
3135 // __kmp_reap_task_teams:
3136 // Free all the task teams on the task team free list.
3137 // Should only be done during library shutdown.
3138 // Cannot do anything that needs a thread structure or gtid since they are
3139 // already gone.
3140 void __kmp_reap_task_teams(void) {
3141  kmp_task_team_t *task_team;
3142 
3143  if (TCR_PTR(__kmp_free_task_teams) != NULL) {
3144  // Free all task_teams on the free list
3145  __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3146  while ((task_team = __kmp_free_task_teams) != NULL) {
3147  __kmp_free_task_teams = task_team->tt.tt_next;
3148  task_team->tt.tt_next = NULL;
3149 
3150  // Free threads_data if necessary
3151  if (task_team->tt.tt_threads_data != NULL) {
3152  __kmp_free_task_threads_data(task_team);
3153  }
3154  __kmp_free(task_team);
3155  }
3156  __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3157  }
3158 }
3159 
3160 // __kmp_wait_to_unref_task_teams:
3161 // Some threads could still be in the fork barrier release code, possibly
3162 // trying to steal tasks. Wait for each thread to unreference its task team.
3163 void __kmp_wait_to_unref_task_teams(void) {
3164  kmp_info_t *thread;
3165  kmp_uint32 spins;
3166  int done;
3167 
3168  KMP_INIT_YIELD(spins);
3169 
3170  for (;;) {
3171  done = TRUE;
3172 
3173  // TODO: GEH - this may be is wrong because some sync would be necessary
3174  // in case threads are added to the pool during the traversal. Need to
3175  // verify that lock for thread pool is held when calling this routine.
3176  for (thread = CCAST(kmp_info_t *, __kmp_thread_pool); thread != NULL;
3177  thread = thread->th.th_next_pool) {
3178 #if KMP_OS_WINDOWS
3179  DWORD exit_val;
3180 #endif
3181  if (TCR_PTR(thread->th.th_task_team) == NULL) {
3182  KA_TRACE(10, ("__kmp_wait_to_unref_task_team: T#%d task_team == NULL\n",
3183  __kmp_gtid_from_thread(thread)));
3184  continue;
3185  }
3186 #if KMP_OS_WINDOWS
3187  // TODO: GEH - add this check for Linux* OS / OS X* as well?
3188  if (!__kmp_is_thread_alive(thread, &exit_val)) {
3189  thread->th.th_task_team = NULL;
3190  continue;
3191  }
3192 #endif
3193 
3194  done = FALSE; // Because th_task_team pointer is not NULL for this thread
3195 
3196  KA_TRACE(10, ("__kmp_wait_to_unref_task_team: Waiting for T#%d to "
3197  "unreference task_team\n",
3198  __kmp_gtid_from_thread(thread)));
3199 
3200  if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
3201  volatile void *sleep_loc;
3202  // If the thread is sleeping, awaken it.
3203  if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
3204  NULL) {
3205  KA_TRACE(
3206  10,
3207  ("__kmp_wait_to_unref_task_team: T#%d waking up thread T#%d\n",
3208  __kmp_gtid_from_thread(thread), __kmp_gtid_from_thread(thread)));
3209  __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc);
3210  }
3211  }
3212  }
3213  if (done) {
3214  break;
3215  }
3216 
3217  // If we are oversubscribed, or have waited a bit (and library mode is
3218  // throughput), yield. Pause is in the following code.
3219  KMP_YIELD(TCR_4(__kmp_nth) > __kmp_avail_proc);
3220  KMP_YIELD_SPIN(spins); // Yields only if KMP_LIBRARY=throughput
3221  }
3222 }
3223 
3224 // __kmp_task_team_setup: Create a task_team for the current team, but use
3225 // an already created, unused one if it already exists.
3226 void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team, int always) {
3227  KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3228 
3229  // If this task_team hasn't been created yet, allocate it. It will be used in
3230  // the region after the next.
3231  // If it exists, it is the current task team and shouldn't be touched yet as
3232  // it may still be in use.
3233  if (team->t.t_task_team[this_thr->th.th_task_state] == NULL &&
3234  (always || team->t.t_nproc > 1)) {
3235  team->t.t_task_team[this_thr->th.th_task_state] =
3236  __kmp_allocate_task_team(this_thr, team);
3237  KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d created new task_team %p "
3238  "for team %d at parity=%d\n",
3239  __kmp_gtid_from_thread(this_thr),
3240  team->t.t_task_team[this_thr->th.th_task_state],
3241  ((team != NULL) ? team->t.t_id : -1),
3242  this_thr->th.th_task_state));
3243  }
3244 
3245  // After threads exit the release, they will call sync, and then point to this
3246  // other task_team; make sure it is allocated and properly initialized. As
3247  // threads spin in the barrier release phase, they will continue to use the
3248  // previous task_team struct(above), until they receive the signal to stop
3249  // checking for tasks (they can't safely reference the kmp_team_t struct,
3250  // which could be reallocated by the master thread). No task teams are formed
3251  // for serialized teams.
3252  if (team->t.t_nproc > 1) {
3253  int other_team = 1 - this_thr->th.th_task_state;
3254  if (team->t.t_task_team[other_team] == NULL) { // setup other team as well
3255  team->t.t_task_team[other_team] =
3256  __kmp_allocate_task_team(this_thr, team);
3257  KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d created second new "
3258  "task_team %p for team %d at parity=%d\n",
3259  __kmp_gtid_from_thread(this_thr),
3260  team->t.t_task_team[other_team],
3261  ((team != NULL) ? team->t.t_id : -1), other_team));
3262  } else { // Leave the old task team struct in place for the upcoming region;
3263  // adjust as needed
3264  kmp_task_team_t *task_team = team->t.t_task_team[other_team];
3265  if (!task_team->tt.tt_active ||
3266  team->t.t_nproc != task_team->tt.tt_nproc) {
3267  TCW_4(task_team->tt.tt_nproc, team->t.t_nproc);
3268  TCW_4(task_team->tt.tt_found_tasks, FALSE);
3269 #if OMP_45_ENABLED
3270  TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3271 #endif
3272  KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads,
3273  team->t.t_nproc);
3274  TCW_4(task_team->tt.tt_active, TRUE);
3275  }
3276  // if team size has changed, the first thread to enable tasking will
3277  // realloc threads_data if necessary
3278  KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d reset next task_team "
3279  "%p for team %d at parity=%d\n",
3280  __kmp_gtid_from_thread(this_thr),
3281  team->t.t_task_team[other_team],
3282  ((team != NULL) ? team->t.t_id : -1), other_team));
3283  }
3284  }
3285 }
3286 
3287 // __kmp_task_team_sync: Propagation of task team data from team to threads
3288 // which happens just after the release phase of a team barrier. This may be
3289 // called by any thread, but only for teams with # threads > 1.
3290 void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team) {
3291  KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3292 
3293  // Toggle the th_task_state field, to switch which task_team this thread
3294  // refers to
3295  this_thr->th.th_task_state = 1 - this_thr->th.th_task_state;
3296  // It is now safe to propagate the task team pointer from the team struct to
3297  // the current thread.
3298  TCW_PTR(this_thr->th.th_task_team,
3299  team->t.t_task_team[this_thr->th.th_task_state]);
3300  KA_TRACE(20,
3301  ("__kmp_task_team_sync: Thread T#%d task team switched to task_team "
3302  "%p from Team #%d (parity=%d)\n",
3303  __kmp_gtid_from_thread(this_thr), this_thr->th.th_task_team,
3304  ((team != NULL) ? team->t.t_id : -1), this_thr->th.th_task_state));
3305 }
3306 
3307 // __kmp_task_team_wait: Master thread waits for outstanding tasks after the
3308 // barrier gather phase. Only called by master thread if #threads in team > 1 or
3309 // if proxy tasks were created.
3310 //
3311 // wait is a flag that defaults to 1 (see kmp.h), but waiting can be turned off
3312 // by passing in 0 optionally as the last argument. When wait is zero, master
3313 // thread does not wait for unfinished_threads to reach 0.
3314 void __kmp_task_team_wait(
3315  kmp_info_t *this_thr,
3316  kmp_team_t *team USE_ITT_BUILD_ARG(void *itt_sync_obj), int wait) {
3317  kmp_task_team_t *task_team = team->t.t_task_team[this_thr->th.th_task_state];
3318 
3319  KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3320  KMP_DEBUG_ASSERT(task_team == this_thr->th.th_task_team);
3321 
3322  if ((task_team != NULL) && KMP_TASKING_ENABLED(task_team)) {
3323  if (wait) {
3324  KA_TRACE(20, ("__kmp_task_team_wait: Master T#%d waiting for all tasks "
3325  "(for unfinished_threads to reach 0) on task_team = %p\n",
3326  __kmp_gtid_from_thread(this_thr), task_team));
3327  // Worker threads may have dropped through to release phase, but could
3328  // still be executing tasks. Wait here for tasks to complete. To avoid
3329  // memory contention, only master thread checks termination condition.
3330  kmp_flag_32 flag(RCAST(std::atomic<kmp_uint32> *,
3331  &task_team->tt.tt_unfinished_threads),
3332  0U);
3333  flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj));
3334  }
3335  // Deactivate the old task team, so that the worker threads will stop
3336  // referencing it while spinning.
3337  KA_TRACE(
3338  20,
3339  ("__kmp_task_team_wait: Master T#%d deactivating task_team %p: "
3340  "setting active to false, setting local and team's pointer to NULL\n",
3341  __kmp_gtid_from_thread(this_thr), task_team));
3342 #if OMP_45_ENABLED
3343  KMP_DEBUG_ASSERT(task_team->tt.tt_nproc > 1 ||
3344  task_team->tt.tt_found_proxy_tasks == TRUE);
3345  TCW_SYNC_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3346 #else
3347  KMP_DEBUG_ASSERT(task_team->tt.tt_nproc > 1);
3348 #endif
3349  KMP_CHECK_UPDATE(task_team->tt.tt_untied_task_encountered, 0);
3350  TCW_SYNC_4(task_team->tt.tt_active, FALSE);
3351  KMP_MB();
3352 
3353  TCW_PTR(this_thr->th.th_task_team, NULL);
3354  }
3355 }
3356 
3357 // __kmp_tasking_barrier:
3358 // This routine may only called when __kmp_tasking_mode == tskm_extra_barrier.
3359 // Internal function to execute all tasks prior to a regular barrier or a join
3360 // barrier. It is a full barrier itself, which unfortunately turns regular
3361 // barriers into double barriers and join barriers into 1 1/2 barriers.
3362 void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, int gtid) {
3363  std::atomic<kmp_uint32> *spin = RCAST(
3364  std::atomic<kmp_uint32> *,
3365  &team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads);
3366  int flag = FALSE;
3367  KMP_DEBUG_ASSERT(__kmp_tasking_mode == tskm_extra_barrier);
3368 
3369 #if USE_ITT_BUILD
3370  KMP_FSYNC_SPIN_INIT(spin, NULL);
3371 #endif /* USE_ITT_BUILD */
3372  kmp_flag_32 spin_flag(spin, 0U);
3373  while (!spin_flag.execute_tasks(thread, gtid, TRUE,
3374  &flag USE_ITT_BUILD_ARG(NULL), 0)) {
3375 #if USE_ITT_BUILD
3376  // TODO: What about itt_sync_obj??
3377  KMP_FSYNC_SPIN_PREPARE(RCAST(void *, spin));
3378 #endif /* USE_ITT_BUILD */
3379 
3380  if (TCR_4(__kmp_global.g.g_done)) {
3381  if (__kmp_global.g.g_abort)
3382  __kmp_abort_thread();
3383  break;
3384  }
3385  KMP_YIELD(TRUE); // GH: We always yield here
3386  }
3387 #if USE_ITT_BUILD
3388  KMP_FSYNC_SPIN_ACQUIRED(RCAST(void *, spin));
3389 #endif /* USE_ITT_BUILD */
3390 }
3391 
3392 #if OMP_45_ENABLED
3393 
3394 // __kmp_give_task puts a task into a given thread queue if:
3395 // - the queue for that thread was created
3396 // - there's space in that queue
3397 // Because of this, __kmp_push_task needs to check if there's space after
3398 // getting the lock
3399 static bool __kmp_give_task(kmp_info_t *thread, kmp_int32 tid, kmp_task_t *task,
3400  kmp_int32 pass) {
3401  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
3402  kmp_task_team_t *task_team = taskdata->td_task_team;
3403 
3404  KA_TRACE(20, ("__kmp_give_task: trying to give task %p to thread %d.\n",
3405  taskdata, tid));
3406 
3407  // If task_team is NULL something went really bad...
3408  KMP_DEBUG_ASSERT(task_team != NULL);
3409 
3410  bool result = false;
3411  kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
3412 
3413  if (thread_data->td.td_deque == NULL) {
3414  // There's no queue in this thread, go find another one
3415  // We're guaranteed that at least one thread has a queue
3416  KA_TRACE(30,
3417  ("__kmp_give_task: thread %d has no queue while giving task %p.\n",
3418  tid, taskdata));
3419  return result;
3420  }
3421 
3422  if (TCR_4(thread_data->td.td_deque_ntasks) >=
3423  TASK_DEQUE_SIZE(thread_data->td)) {
3424  KA_TRACE(
3425  30,
3426  ("__kmp_give_task: queue is full while giving task %p to thread %d.\n",
3427  taskdata, tid));
3428 
3429  // if this deque is bigger than the pass ratio give a chance to another
3430  // thread
3431  if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
3432  return result;
3433 
3434  __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3435  __kmp_realloc_task_deque(thread, thread_data);
3436 
3437  } else {
3438 
3439  __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3440 
3441  if (TCR_4(thread_data->td.td_deque_ntasks) >=
3442  TASK_DEQUE_SIZE(thread_data->td)) {
3443  KA_TRACE(30, ("__kmp_give_task: queue is full while giving task %p to "
3444  "thread %d.\n",
3445  taskdata, tid));
3446 
3447  // if this deque is bigger than the pass ratio give a chance to another
3448  // thread
3449  if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
3450  goto release_and_exit;
3451 
3452  __kmp_realloc_task_deque(thread, thread_data);
3453  }
3454  }
3455 
3456  // lock is held here, and there is space in the deque
3457 
3458  thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata;
3459  // Wrap index.
3460  thread_data->td.td_deque_tail =
3461  (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
3462  TCW_4(thread_data->td.td_deque_ntasks,
3463  TCR_4(thread_data->td.td_deque_ntasks) + 1);
3464 
3465  result = true;
3466  KA_TRACE(30, ("__kmp_give_task: successfully gave task %p to thread %d.\n",
3467  taskdata, tid));
3468 
3469 release_and_exit:
3470  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3471 
3472  return result;
3473 }
3474 
3475 /* The finish of the proxy tasks is divided in two pieces:
3476  - the top half is the one that can be done from a thread outside the team
3477  - the bottom half must be run from a thread within the team
3478 
3479  In order to run the bottom half the task gets queued back into one of the
3480  threads of the team. Once the td_incomplete_child_task counter of the parent
3481  is decremented the threads can leave the barriers. So, the bottom half needs
3482  to be queued before the counter is decremented. The top half is therefore
3483  divided in two parts:
3484  - things that can be run before queuing the bottom half
3485  - things that must be run after queuing the bottom half
3486 
3487  This creates a second race as the bottom half can free the task before the
3488  second top half is executed. To avoid this we use the
3489  td_incomplete_child_task of the proxy task to synchronize the top and bottom
3490  half. */
3491 static void __kmp_first_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
3492  KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
3493  KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3494  KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
3495  KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
3496 
3497  taskdata->td_flags.complete = 1; // mark the task as completed
3498 
3499  if (taskdata->td_taskgroup)
3500  KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
3501 
3502  // Create an imaginary children for this task so the bottom half cannot
3503  // release the task before we have completed the second top half
3504  KMP_ATOMIC_INC(&taskdata->td_incomplete_child_tasks);
3505 }
3506 
3507 static void __kmp_second_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
3508  kmp_int32 children = 0;
3509 
3510  // Predecrement simulated by "- 1" calculation
3511  children =
3512  KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks) - 1;
3513  KMP_DEBUG_ASSERT(children >= 0);
3514 
3515  // Remove the imaginary children
3516  KMP_ATOMIC_DEC(&taskdata->td_incomplete_child_tasks);
3517 }
3518 
3519 static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask) {
3520  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3521  kmp_info_t *thread = __kmp_threads[gtid];
3522 
3523  KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3524  KMP_DEBUG_ASSERT(taskdata->td_flags.complete ==
3525  1); // top half must run before bottom half
3526 
3527  // We need to wait to make sure the top half is finished
3528  // Spinning here should be ok as this should happen quickly
3529  while (KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) > 0)
3530  ;
3531 
3532  __kmp_release_deps(gtid, taskdata);
3533  __kmp_free_task_and_ancestors(gtid, taskdata, thread);
3534 }
3535 
3544 void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask) {
3545  KMP_DEBUG_ASSERT(ptask != NULL);
3546  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3547  KA_TRACE(
3548  10, ("__kmp_proxy_task_completed(enter): T#%d proxy task %p completing\n",
3549  gtid, taskdata));
3550 
3551  KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3552 
3553  __kmp_first_top_half_finish_proxy(taskdata);
3554  __kmp_second_top_half_finish_proxy(taskdata);
3555  __kmp_bottom_half_finish_proxy(gtid, ptask);
3556 
3557  KA_TRACE(10,
3558  ("__kmp_proxy_task_completed(exit): T#%d proxy task %p completing\n",
3559  gtid, taskdata));
3560 }
3561 
3569 void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask) {
3570  KMP_DEBUG_ASSERT(ptask != NULL);
3571  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3572 
3573  KA_TRACE(
3574  10,
3575  ("__kmp_proxy_task_completed_ooo(enter): proxy task completing ooo %p\n",
3576  taskdata));
3577 
3578  KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3579 
3580  __kmp_first_top_half_finish_proxy(taskdata);
3581 
3582  // Enqueue task to complete bottom half completion from a thread within the
3583  // corresponding team
3584  kmp_team_t *team = taskdata->td_team;
3585  kmp_int32 nthreads = team->t.t_nproc;
3586  kmp_info_t *thread;
3587 
3588  // This should be similar to start_k = __kmp_get_random( thread ) % nthreads
3589  // but we cannot use __kmp_get_random here
3590  kmp_int32 start_k = 0;
3591  kmp_int32 pass = 1;
3592  kmp_int32 k = start_k;
3593 
3594  do {
3595  // For now we're just linearly trying to find a thread
3596  thread = team->t.t_threads[k];
3597  k = (k + 1) % nthreads;
3598 
3599  // we did a full pass through all the threads
3600  if (k == start_k)
3601  pass = pass << 1;
3602 
3603  } while (!__kmp_give_task(thread, k, ptask, pass));
3604 
3605  __kmp_second_top_half_finish_proxy(taskdata);
3606 
3607  KA_TRACE(
3608  10,
3609  ("__kmp_proxy_task_completed_ooo(exit): proxy task completing ooo %p\n",
3610  taskdata));
3611 }
3612 
3613 // __kmp_task_dup_alloc: Allocate the taskdata and make a copy of source task
3614 // for taskloop
3615 //
3616 // thread: allocating thread
3617 // task_src: pointer to source task to be duplicated
3618 // returns: a pointer to the allocated kmp_task_t structure (task).
3619 kmp_task_t *__kmp_task_dup_alloc(kmp_info_t *thread, kmp_task_t *task_src) {
3620  kmp_task_t *task;
3621  kmp_taskdata_t *taskdata;
3622  kmp_taskdata_t *taskdata_src;
3623  kmp_taskdata_t *parent_task = thread->th.th_current_task;
3624  size_t shareds_offset;
3625  size_t task_size;
3626 
3627  KA_TRACE(10, ("__kmp_task_dup_alloc(enter): Th %p, source task %p\n", thread,
3628  task_src));
3629  taskdata_src = KMP_TASK_TO_TASKDATA(task_src);
3630  KMP_DEBUG_ASSERT(taskdata_src->td_flags.proxy ==
3631  TASK_FULL); // it should not be proxy task
3632  KMP_DEBUG_ASSERT(taskdata_src->td_flags.tasktype == TASK_EXPLICIT);
3633  task_size = taskdata_src->td_size_alloc;
3634 
3635  // Allocate a kmp_taskdata_t block and a kmp_task_t block.
3636  KA_TRACE(30, ("__kmp_task_dup_alloc: Th %p, malloc size %ld\n", thread,
3637  task_size));
3638 #if USE_FAST_MEMORY
3639  taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, task_size);
3640 #else
3641  taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, task_size);
3642 #endif /* USE_FAST_MEMORY */
3643  KMP_MEMCPY(taskdata, taskdata_src, task_size);
3644 
3645  task = KMP_TASKDATA_TO_TASK(taskdata);
3646 
3647  // Initialize new task (only specific fields not affected by memcpy)
3648  taskdata->td_task_id = KMP_GEN_TASK_ID();
3649  if (task->shareds != NULL) { // need setup shareds pointer
3650  shareds_offset = (char *)task_src->shareds - (char *)taskdata_src;
3651  task->shareds = &((char *)taskdata)[shareds_offset];
3652  KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
3653  0);
3654  }
3655  taskdata->td_alloc_thread = thread;
3656  taskdata->td_parent = parent_task;
3657  taskdata->td_taskgroup =
3658  parent_task
3659  ->td_taskgroup; // task inherits the taskgroup from the parent task
3660 
3661  // Only need to keep track of child task counts if team parallel and tasking
3662  // not serialized
3663  if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) {
3664  KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks);
3665  if (parent_task->td_taskgroup)
3666  KMP_ATOMIC_INC(&parent_task->td_taskgroup->count);
3667  // Only need to keep track of allocated child tasks for explicit tasks since
3668  // implicit not deallocated
3669  if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT)
3670  KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks);
3671  }
3672 
3673  KA_TRACE(20,
3674  ("__kmp_task_dup_alloc(exit): Th %p, created task %p, parent=%p\n",
3675  thread, taskdata, taskdata->td_parent));
3676 #if OMPT_SUPPORT
3677  if (UNLIKELY(ompt_enabled.enabled))
3678  __ompt_task_init(taskdata, thread->th.th_info.ds.ds_gtid);
3679 #endif
3680  return task;
3681 }
3682 
3683 // Routine optionally generated by the compiler for setting the lastprivate flag
3684 // and calling needed constructors for private/firstprivate objects
3685 // (used to form taskloop tasks from pattern task)
3686 // Parameters: dest task, src task, lastprivate flag.
3687 typedef void (*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32);
3688 
3689 KMP_BUILD_ASSERT(sizeof(long) == 4 || sizeof(long) == 8);
3690 
3691 // class to encapsulate manipulating loop bounds in a taskloop task.
3692 // this abstracts away the Intel vs GOMP taskloop interface for setting/getting
3693 // the loop bound variables.
3694 class kmp_taskloop_bounds_t {
3695  kmp_task_t *task;
3696  const kmp_taskdata_t *taskdata;
3697  size_t lower_offset;
3698  size_t upper_offset;
3699 
3700 public:
3701  kmp_taskloop_bounds_t(kmp_task_t *_task, kmp_uint64 *lb, kmp_uint64 *ub)
3702  : task(_task), taskdata(KMP_TASK_TO_TASKDATA(task)),
3703  lower_offset((char *)lb - (char *)task),
3704  upper_offset((char *)ub - (char *)task) {
3705  KMP_DEBUG_ASSERT((char *)lb > (char *)_task);
3706  KMP_DEBUG_ASSERT((char *)ub > (char *)_task);
3707  }
3708  kmp_taskloop_bounds_t(kmp_task_t *_task, const kmp_taskloop_bounds_t &bounds)
3709  : task(_task), taskdata(KMP_TASK_TO_TASKDATA(_task)),
3710  lower_offset(bounds.lower_offset), upper_offset(bounds.upper_offset) {}
3711  size_t get_lower_offset() const { return lower_offset; }
3712  size_t get_upper_offset() const { return upper_offset; }
3713  kmp_uint64 get_lb() const {
3714  kmp_int64 retval;
3715 #if defined(KMP_GOMP_COMPAT)
3716  // Intel task just returns the lower bound normally
3717  if (!taskdata->td_flags.native) {
3718  retval = *(kmp_int64 *)((char *)task + lower_offset);
3719  } else {
3720  // GOMP task has to take into account the sizeof(long)
3721  if (taskdata->td_size_loop_bounds == 4) {
3722  kmp_int32 *lb = RCAST(kmp_int32 *, task->shareds);
3723  retval = (kmp_int64)*lb;
3724  } else {
3725  kmp_int64 *lb = RCAST(kmp_int64 *, task->shareds);
3726  retval = (kmp_int64)*lb;
3727  }
3728  }
3729 #else
3730  retval = *(kmp_int64 *)((char *)task + lower_offset);
3731 #endif // defined(KMP_GOMP_COMPAT)
3732  return retval;
3733  }
3734  kmp_uint64 get_ub() const {
3735  kmp_int64 retval;
3736 #if defined(KMP_GOMP_COMPAT)
3737  // Intel task just returns the upper bound normally
3738  if (!taskdata->td_flags.native) {
3739  retval = *(kmp_int64 *)((char *)task + upper_offset);
3740  } else {
3741  // GOMP task has to take into account the sizeof(long)
3742  if (taskdata->td_size_loop_bounds == 4) {
3743  kmp_int32 *ub = RCAST(kmp_int32 *, task->shareds) + 1;
3744  retval = (kmp_int64)*ub;
3745  } else {
3746  kmp_int64 *ub = RCAST(kmp_int64 *, task->shareds) + 1;
3747  retval = (kmp_int64)*ub;
3748  }
3749  }
3750 #else
3751  retval = *(kmp_int64 *)((char *)task + upper_offset);
3752 #endif // defined(KMP_GOMP_COMPAT)
3753  return retval;
3754  }
3755  void set_lb(kmp_uint64 lb) {
3756 #if defined(KMP_GOMP_COMPAT)
3757  // Intel task just sets the lower bound normally
3758  if (!taskdata->td_flags.native) {
3759  *(kmp_uint64 *)((char *)task + lower_offset) = lb;
3760  } else {
3761  // GOMP task has to take into account the sizeof(long)
3762  if (taskdata->td_size_loop_bounds == 4) {
3763  kmp_uint32 *lower = RCAST(kmp_uint32 *, task->shareds);
3764  *lower = (kmp_uint32)lb;
3765  } else {
3766  kmp_uint64 *lower = RCAST(kmp_uint64 *, task->shareds);
3767  *lower = (kmp_uint64)lb;
3768  }
3769  }
3770 #else
3771  *(kmp_uint64 *)((char *)task + lower_offset) = lb;
3772 #endif // defined(KMP_GOMP_COMPAT)
3773  }
3774  void set_ub(kmp_uint64 ub) {
3775 #if defined(KMP_GOMP_COMPAT)
3776  // Intel task just sets the upper bound normally
3777  if (!taskdata->td_flags.native) {
3778  *(kmp_uint64 *)((char *)task + upper_offset) = ub;
3779  } else {
3780  // GOMP task has to take into account the sizeof(long)
3781  if (taskdata->td_size_loop_bounds == 4) {
3782  kmp_uint32 *upper = RCAST(kmp_uint32 *, task->shareds) + 1;
3783  *upper = (kmp_uint32)ub;
3784  } else {
3785  kmp_uint64 *upper = RCAST(kmp_uint64 *, task->shareds) + 1;
3786  *upper = (kmp_uint64)ub;
3787  }
3788  }
3789 #else
3790  *(kmp_uint64 *)((char *)task + upper_offset) = ub;
3791 #endif // defined(KMP_GOMP_COMPAT)
3792  }
3793 };
3794 
3795 // __kmp_taskloop_linear: Start tasks of the taskloop linearly
3796 //
3797 // loc Source location information
3798 // gtid Global thread ID
3799 // task Pattern task, exposes the loop iteration range
3800 // lb Pointer to loop lower bound in task structure
3801 // ub Pointer to loop upper bound in task structure
3802 // st Loop stride
3803 // ub_glob Global upper bound (used for lastprivate check)
3804 // num_tasks Number of tasks to execute
3805 // grainsize Number of loop iterations per task
3806 // extras Number of chunks with grainsize+1 iterations
3807 // tc Iterations count
3808 // task_dup Tasks duplication routine
3809 // codeptr_ra Return address for OMPT events
3810 void __kmp_taskloop_linear(ident_t *loc, int gtid, kmp_task_t *task,
3811  kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
3812  kmp_uint64 ub_glob, kmp_uint64 num_tasks,
3813  kmp_uint64 grainsize, kmp_uint64 extras,
3814  kmp_uint64 tc,
3815 #if OMPT_SUPPORT
3816  void *codeptr_ra,
3817 #endif
3818  void *task_dup) {
3819  KMP_COUNT_BLOCK(OMP_TASKLOOP);
3820  KMP_TIME_PARTITIONED_BLOCK(OMP_taskloop_scheduling);
3821  p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
3822  // compiler provides global bounds here
3823  kmp_taskloop_bounds_t task_bounds(task, lb, ub);
3824  kmp_uint64 lower = task_bounds.get_lb();
3825  kmp_uint64 upper = task_bounds.get_ub();
3826  kmp_uint64 i;
3827  kmp_info_t *thread = __kmp_threads[gtid];
3828  kmp_taskdata_t *current_task = thread->th.th_current_task;
3829  kmp_task_t *next_task;
3830  kmp_int32 lastpriv = 0;
3831 
3832  KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
3833  KMP_DEBUG_ASSERT(num_tasks > extras);
3834  KMP_DEBUG_ASSERT(num_tasks > 0);
3835  KA_TRACE(20, ("__kmp_taskloop_linear: T#%d: %lld tasks, grainsize %lld, "
3836  "extras %lld, i=%lld,%lld(%d)%lld, dup %p\n",
3837  gtid, num_tasks, grainsize, extras, lower, upper, ub_glob, st,
3838  task_dup));
3839 
3840  // Launch num_tasks tasks, assign grainsize iterations each task
3841  for (i = 0; i < num_tasks; ++i) {
3842  kmp_uint64 chunk_minus_1;
3843  if (extras == 0) {
3844  chunk_minus_1 = grainsize - 1;
3845  } else {
3846  chunk_minus_1 = grainsize;
3847  --extras; // first extras iterations get bigger chunk (grainsize+1)
3848  }
3849  upper = lower + st * chunk_minus_1;
3850  if (i == num_tasks - 1) {
3851  // schedule the last task, set lastprivate flag if needed
3852  if (st == 1) { // most common case
3853  KMP_DEBUG_ASSERT(upper == *ub);
3854  if (upper == ub_glob)
3855  lastpriv = 1;
3856  } else if (st > 0) { // positive loop stride
3857  KMP_DEBUG_ASSERT((kmp_uint64)st > *ub - upper);
3858  if ((kmp_uint64)st > ub_glob - upper)
3859  lastpriv = 1;
3860  } else { // negative loop stride
3861  KMP_DEBUG_ASSERT(upper + st < *ub);
3862  if (upper - ub_glob < (kmp_uint64)(-st))
3863  lastpriv = 1;
3864  }
3865  }
3866  next_task = __kmp_task_dup_alloc(thread, task); // allocate new task
3867  kmp_taskdata_t *next_taskdata = KMP_TASK_TO_TASKDATA(next_task);
3868  kmp_taskloop_bounds_t next_task_bounds =
3869  kmp_taskloop_bounds_t(next_task, task_bounds);
3870 
3871  // adjust task-specific bounds
3872  next_task_bounds.set_lb(lower);
3873  if (next_taskdata->td_flags.native) {
3874  next_task_bounds.set_ub(upper + (st > 0 ? 1 : -1));
3875  } else {
3876  next_task_bounds.set_ub(upper);
3877  }
3878  if (ptask_dup != NULL) // set lastprivate flag, construct fistprivates, etc.
3879  ptask_dup(next_task, task, lastpriv);
3880  KA_TRACE(40,
3881  ("__kmp_taskloop_linear: T#%d; task #%llu: task %p: lower %lld, "
3882  "upper %lld stride %lld, (offsets %p %p)\n",
3883  gtid, i, next_task, lower, upper, st,
3884  next_task_bounds.get_lower_offset(),
3885  next_task_bounds.get_upper_offset()));
3886 #if OMPT_SUPPORT
3887  __kmp_omp_taskloop_task(NULL, gtid, next_task,
3888  codeptr_ra); // schedule new task
3889 #else
3890  __kmp_omp_task(gtid, next_task, true); // schedule new task
3891 #endif
3892  lower = upper + st; // adjust lower bound for the next iteration
3893  }
3894  // free the pattern task and exit
3895  __kmp_task_start(gtid, task, current_task); // make internal bookkeeping
3896  // do not execute the pattern task, just do internal bookkeeping
3897  __kmp_task_finish<false>(gtid, task, current_task);
3898 }
3899 
3900 // Structure to keep taskloop parameters for auxiliary task
3901 // kept in the shareds of the task structure.
3902 typedef struct __taskloop_params {
3903  kmp_task_t *task;
3904  kmp_uint64 *lb;
3905  kmp_uint64 *ub;
3906  void *task_dup;
3907  kmp_int64 st;
3908  kmp_uint64 ub_glob;
3909  kmp_uint64 num_tasks;
3910  kmp_uint64 grainsize;
3911  kmp_uint64 extras;
3912  kmp_uint64 tc;
3913  kmp_uint64 num_t_min;
3914 #if OMPT_SUPPORT
3915  void *codeptr_ra;
3916 #endif
3917 } __taskloop_params_t;
3918 
3919 void __kmp_taskloop_recur(ident_t *, int, kmp_task_t *, kmp_uint64 *,
3920  kmp_uint64 *, kmp_int64, kmp_uint64, kmp_uint64,
3921  kmp_uint64, kmp_uint64, kmp_uint64, kmp_uint64,
3922 #if OMPT_SUPPORT
3923  void *,
3924 #endif
3925  void *);
3926 
3927 // Execute part of the the taskloop submitted as a task.
3928 int __kmp_taskloop_task(int gtid, void *ptask) {
3929  __taskloop_params_t *p =
3930  (__taskloop_params_t *)((kmp_task_t *)ptask)->shareds;
3931  kmp_task_t *task = p->task;
3932  kmp_uint64 *lb = p->lb;
3933  kmp_uint64 *ub = p->ub;
3934  void *task_dup = p->task_dup;
3935  // p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
3936  kmp_int64 st = p->st;
3937  kmp_uint64 ub_glob = p->ub_glob;
3938  kmp_uint64 num_tasks = p->num_tasks;
3939  kmp_uint64 grainsize = p->grainsize;
3940  kmp_uint64 extras = p->extras;
3941  kmp_uint64 tc = p->tc;
3942  kmp_uint64 num_t_min = p->num_t_min;
3943 #if OMPT_SUPPORT
3944  void *codeptr_ra = p->codeptr_ra;
3945 #endif
3946 #if KMP_DEBUG
3947  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
3948  KMP_DEBUG_ASSERT(task != NULL);
3949  KA_TRACE(20, ("__kmp_taskloop_task: T#%d, task %p: %lld tasks, grainsize"
3950  " %lld, extras %lld, i=%lld,%lld(%d), dup %p\n",
3951  gtid, taskdata, num_tasks, grainsize, extras, *lb, *ub, st,
3952  task_dup));
3953 #endif
3954  KMP_DEBUG_ASSERT(num_tasks * 2 + 1 > num_t_min);
3955  if (num_tasks > num_t_min)
3956  __kmp_taskloop_recur(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
3957  grainsize, extras, tc, num_t_min,
3958 #if OMPT_SUPPORT
3959  codeptr_ra,
3960 #endif
3961  task_dup);
3962  else
3963  __kmp_taskloop_linear(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
3964  grainsize, extras, tc,
3965 #if OMPT_SUPPORT
3966  codeptr_ra,
3967 #endif
3968  task_dup);
3969 
3970  KA_TRACE(40, ("__kmp_taskloop_task(exit): T#%d\n", gtid));
3971  return 0;
3972 }
3973 
3974 // Schedule part of the the taskloop as a task,
3975 // execute the rest of the the taskloop.
3976 //
3977 // loc Source location information
3978 // gtid Global thread ID
3979 // task Pattern task, exposes the loop iteration range
3980 // lb Pointer to loop lower bound in task structure
3981 // ub Pointer to loop upper bound in task structure
3982 // st Loop stride
3983 // ub_glob Global upper bound (used for lastprivate check)
3984 // num_tasks Number of tasks to execute
3985 // grainsize Number of loop iterations per task
3986 // extras Number of chunks with grainsize+1 iterations
3987 // tc Iterations count
3988 // num_t_min Threashold to launch tasks recursively
3989 // task_dup Tasks duplication routine
3990 // codeptr_ra Return address for OMPT events
3991 void __kmp_taskloop_recur(ident_t *loc, int gtid, kmp_task_t *task,
3992  kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
3993  kmp_uint64 ub_glob, kmp_uint64 num_tasks,
3994  kmp_uint64 grainsize, kmp_uint64 extras,
3995  kmp_uint64 tc, kmp_uint64 num_t_min,
3996 #if OMPT_SUPPORT
3997  void *codeptr_ra,
3998 #endif
3999  void *task_dup) {
4000 #if KMP_DEBUG
4001  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
4002  KMP_DEBUG_ASSERT(task != NULL);
4003  KMP_DEBUG_ASSERT(num_tasks > num_t_min);
4004  KA_TRACE(20, ("__kmp_taskloop_recur: T#%d, task %p: %lld tasks, grainsize"
4005  " %lld, extras %lld, i=%lld,%lld(%d), dup %p\n",
4006  gtid, taskdata, num_tasks, grainsize, extras, *lb, *ub, st,
4007  task_dup));
4008 #endif
4009  p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
4010  kmp_uint64 lower = *lb;
4011  kmp_info_t *thread = __kmp_threads[gtid];
4012  // kmp_taskdata_t *current_task = thread->th.th_current_task;
4013  kmp_task_t *next_task;
4014  size_t lower_offset =
4015  (char *)lb - (char *)task; // remember offset of lb in the task structure
4016  size_t upper_offset =
4017  (char *)ub - (char *)task; // remember offset of ub in the task structure
4018 
4019  KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
4020  KMP_DEBUG_ASSERT(num_tasks > extras);
4021  KMP_DEBUG_ASSERT(num_tasks > 0);
4022 
4023  // split the loop in two halves
4024  kmp_uint64 lb1, ub0, tc0, tc1, ext0, ext1;
4025  kmp_uint64 gr_size0 = grainsize;
4026  kmp_uint64 n_tsk0 = num_tasks >> 1; // num_tasks/2 to execute
4027  kmp_uint64 n_tsk1 = num_tasks - n_tsk0; // to schedule as a task
4028  if (n_tsk0 <= extras) {
4029  gr_size0++; // integrate extras into grainsize
4030  ext0 = 0; // no extra iters in 1st half
4031  ext1 = extras - n_tsk0; // remaining extras
4032  tc0 = gr_size0 * n_tsk0;
4033  tc1 = tc - tc0;
4034  } else { // n_tsk0 > extras
4035  ext1 = 0; // no extra iters in 2nd half
4036  ext0 = extras;
4037  tc1 = grainsize * n_tsk1;
4038  tc0 = tc - tc1;
4039  }
4040  ub0 = lower + st * (tc0 - 1);
4041  lb1 = ub0 + st;
4042 
4043  // create pattern task for 2nd half of the loop
4044  next_task = __kmp_task_dup_alloc(thread, task); // duplicate the task
4045  // adjust lower bound (upper bound is not changed) for the 2nd half
4046  *(kmp_uint64 *)((char *)next_task + lower_offset) = lb1;
4047  if (ptask_dup != NULL) // construct fistprivates, etc.
4048  ptask_dup(next_task, task, 0);
4049  *ub = ub0; // adjust upper bound for the 1st half
4050 
4051  // create auxiliary task for 2nd half of the loop
4052  kmp_task_t *new_task =
4053  __kmpc_omp_task_alloc(loc, gtid, 1, 3 * sizeof(void *),
4054  sizeof(__taskloop_params_t), &__kmp_taskloop_task);
4055  __taskloop_params_t *p = (__taskloop_params_t *)new_task->shareds;
4056  p->task = next_task;
4057  p->lb = (kmp_uint64 *)((char *)next_task + lower_offset);
4058  p->ub = (kmp_uint64 *)((char *)next_task + upper_offset);
4059  p->task_dup = task_dup;
4060  p->st = st;
4061  p->ub_glob = ub_glob;
4062  p->num_tasks = n_tsk1;
4063  p->grainsize = grainsize;
4064  p->extras = ext1;
4065  p->tc = tc1;
4066  p->num_t_min = num_t_min;
4067 #if OMPT_SUPPORT
4068  p->codeptr_ra = codeptr_ra;
4069 #endif
4070 
4071 #if OMPT_SUPPORT
4072  // schedule new task with correct return address for OMPT events
4073  __kmp_omp_taskloop_task(NULL, gtid, new_task, codeptr_ra);
4074 #else
4075  __kmp_omp_task(gtid, new_task, true); // schedule new task
4076 #endif
4077 
4078  // execute the 1st half of current subrange
4079  if (n_tsk0 > num_t_min)
4080  __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0, gr_size0,
4081  ext0, tc0, num_t_min,
4082 #if OMPT_SUPPORT
4083  codeptr_ra,
4084 #endif
4085  task_dup);
4086  else
4087  __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0,
4088  gr_size0, ext0, tc0,
4089 #if OMPT_SUPPORT
4090  codeptr_ra,
4091 #endif
4092  task_dup);
4093 
4094  KA_TRACE(40, ("__kmpc_taskloop_recur(exit): T#%d\n", gtid));
4095 }
4096 
4113 void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val,
4114  kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup,
4115  int sched, kmp_uint64 grainsize, void *task_dup) {
4116  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
4117  KMP_DEBUG_ASSERT(task != NULL);
4118 
4119  if (nogroup == 0) {
4120 #if OMPT_SUPPORT && OMPT_OPTIONAL
4121  OMPT_STORE_RETURN_ADDRESS(gtid);
4122 #endif
4123  __kmpc_taskgroup(loc, gtid);
4124  }
4125 
4126  // =========================================================================
4127  // calculate loop parameters
4128  kmp_taskloop_bounds_t task_bounds(task, lb, ub);
4129  kmp_uint64 tc;
4130  // compiler provides global bounds here
4131  kmp_uint64 lower = task_bounds.get_lb();
4132  kmp_uint64 upper = task_bounds.get_ub();
4133  kmp_uint64 ub_glob = upper; // global upper used to calc lastprivate flag
4134  kmp_uint64 num_tasks = 0, extras = 0;
4135  kmp_uint64 num_tasks_min = __kmp_taskloop_min_tasks;
4136  kmp_info_t *thread = __kmp_threads[gtid];
4137  kmp_taskdata_t *current_task = thread->th.th_current_task;
4138 
4139  KA_TRACE(20, ("__kmpc_taskloop: T#%d, task %p, lb %lld, ub %lld, st %lld, "
4140  "grain %llu(%d), dup %p\n",
4141  gtid, taskdata, lower, upper, st, grainsize, sched, task_dup));
4142 
4143  // compute trip count
4144  if (st == 1) { // most common case
4145  tc = upper - lower + 1;
4146  } else if (st < 0) {
4147  tc = (lower - upper) / (-st) + 1;
4148  } else { // st > 0
4149  tc = (upper - lower) / st + 1;
4150  }
4151  if (tc == 0) {
4152  KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d zero-trip loop\n", gtid));
4153  // free the pattern task and exit
4154  __kmp_task_start(gtid, task, current_task);
4155  // do not execute anything for zero-trip loop
4156  __kmp_task_finish<false>(gtid, task, current_task);
4157  return;
4158  }
4159 
4160 #if OMPT_SUPPORT && OMPT_OPTIONAL
4161  ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
4162  ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
4163  if (ompt_enabled.ompt_callback_work) {
4164  ompt_callbacks.ompt_callback(ompt_callback_work)(
4165  ompt_work_taskloop, ompt_scope_begin, &(team_info->parallel_data),
4166  &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0));
4167  }
4168 #endif
4169 
4170  if (num_tasks_min == 0)
4171  // TODO: can we choose better default heuristic?
4172  num_tasks_min =
4173  KMP_MIN(thread->th.th_team_nproc * 10, INITIAL_TASK_DEQUE_SIZE);
4174 
4175  // compute num_tasks/grainsize based on the input provided
4176  switch (sched) {
4177  case 0: // no schedule clause specified, we can choose the default
4178  // let's try to schedule (team_size*10) tasks
4179  grainsize = thread->th.th_team_nproc * 10;
4180  case 2: // num_tasks provided
4181  if (grainsize > tc) {
4182  num_tasks = tc; // too big num_tasks requested, adjust values
4183  grainsize = 1;
4184  extras = 0;
4185  } else {
4186  num_tasks = grainsize;
4187  grainsize = tc / num_tasks;
4188  extras = tc % num_tasks;
4189  }
4190  break;
4191  case 1: // grainsize provided
4192  if (grainsize > tc) {
4193  num_tasks = 1; // too big grainsize requested, adjust values
4194  grainsize = tc;
4195  extras = 0;
4196  } else {
4197  num_tasks = tc / grainsize;
4198  // adjust grainsize for balanced distribution of iterations
4199  grainsize = tc / num_tasks;
4200  extras = tc % num_tasks;
4201  }
4202  break;
4203  default:
4204  KMP_ASSERT2(0, "unknown scheduling of taskloop");
4205  }
4206  KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
4207  KMP_DEBUG_ASSERT(num_tasks > extras);
4208  KMP_DEBUG_ASSERT(num_tasks > 0);
4209  // =========================================================================
4210 
4211  // check if clause value first
4212  // Also require GOMP_taskloop to reduce to linear (taskdata->td_flags.native)
4213  if (if_val == 0) { // if(0) specified, mark task as serial
4214  taskdata->td_flags.task_serial = 1;
4215  taskdata->td_flags.tiedness = TASK_TIED; // AC: serial task cannot be untied
4216  // always start serial tasks linearly
4217  __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4218  grainsize, extras, tc,
4219 #if OMPT_SUPPORT
4220  OMPT_GET_RETURN_ADDRESS(0),
4221 #endif
4222  task_dup);
4223  // !taskdata->td_flags.native => currently force linear spawning of tasks
4224  // for GOMP_taskloop
4225  } else if (num_tasks > num_tasks_min && !taskdata->td_flags.native) {
4226  KA_TRACE(20, ("__kmpc_taskloop: T#%d, go recursive: tc %llu, #tasks %llu"
4227  "(%lld), grain %llu, extras %llu\n",
4228  gtid, tc, num_tasks, num_tasks_min, grainsize, extras));
4229  __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4230  grainsize, extras, tc, num_tasks_min,
4231 #if OMPT_SUPPORT
4232  OMPT_GET_RETURN_ADDRESS(0),
4233 #endif
4234  task_dup);
4235  } else {
4236  KA_TRACE(20, ("__kmpc_taskloop: T#%d, go linear: tc %llu, #tasks %llu"
4237  "(%lld), grain %llu, extras %llu\n",
4238  gtid, tc, num_tasks, num_tasks_min, grainsize, extras));
4239  __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4240  grainsize, extras, tc,
4241 #if OMPT_SUPPORT
4242  OMPT_GET_RETURN_ADDRESS(0),
4243 #endif
4244  task_dup);
4245  }
4246 
4247 #if OMPT_SUPPORT && OMPT_OPTIONAL
4248  if (ompt_enabled.ompt_callback_work) {
4249  ompt_callbacks.ompt_callback(ompt_callback_work)(
4250  ompt_work_taskloop, ompt_scope_end, &(team_info->parallel_data),
4251  &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0));
4252  }
4253 #endif
4254 
4255  if (nogroup == 0) {
4256 #if OMPT_SUPPORT && OMPT_OPTIONAL
4257  OMPT_STORE_RETURN_ADDRESS(gtid);
4258 #endif
4259  __kmpc_end_taskgroup(loc, gtid);
4260  }
4261  KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d\n", gtid));
4262 }
4263 
4264 #endif
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
Definition: kmp_stats.h:890
Definition: kmp.h:219