17#include "kmp_config.h"
27#ifndef KMP_STATIC_STEAL_ENABLED
28#define KMP_STATIC_STEAL_ENABLED 1
30#define KMP_WEIGHTED_ITERATIONS_SUPPORTED \
31 (KMP_AFFINITY_SUPPORTED && KMP_STATIC_STEAL_ENABLED && \
32 (KMP_ARCH_X86 || KMP_ARCH_X86_64))
34#define TASK_CURRENT_NOT_QUEUED 0
35#define TASK_CURRENT_QUEUED 1
37#ifdef BUILD_TIED_TASK_STACK
38#define TASK_STACK_EMPTY 0
39#define TASK_STACK_BLOCK_BITS 5
41#define TASK_STACK_BLOCK_SIZE (1 << TASK_STACK_BLOCK_BITS)
43#define TASK_STACK_INDEX_MASK (TASK_STACK_BLOCK_SIZE - 1)
46#define TASK_NOT_PUSHED 1
47#define TASK_SUCCESSFULLY_PUSHED 0
50#define TASK_EXPLICIT 1
51#define TASK_IMPLICIT 0
54#define TASK_DETACHABLE 1
55#define TASK_UNDETACHABLE 0
57#define KMP_CANCEL_THREADS
58#define KMP_THREAD_ATTR
62#if defined(__ANDROID__)
63#undef KMP_CANCEL_THREADS
69#undef KMP_CANCEL_THREADS
94#include "kmp_safe_c_api.h"
100#if KMP_USE_HIER_SCHED
102#undef KMP_USE_HIER_SCHED
103#define KMP_USE_HIER_SCHED KMP_AFFINITY_SUPPORTED
107#if KMP_USE_HWLOC && KMP_AFFINITY_SUPPORTED && !defined(OMPD_SKIP_HWLOC)
109#define KMP_HWLOC_ENABLED 1
110#ifndef HWLOC_OBJ_NUMANODE
111#define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
113#ifndef HWLOC_OBJ_PACKAGE
114#define HWLOC_OBJ_PACKAGE HWLOC_OBJ_SOCKET
117#define KMP_HWLOC_ENABLED 0
120#if KMP_ARCH_X86 || KMP_ARCH_X86_64
121#include <xmmintrin.h>
125#define KMP_INTERNAL_MALLOC(sz) malloc(sz)
126#define KMP_INTERNAL_FREE(p) free(p)
127#define KMP_INTERNAL_REALLOC(p, sz) realloc((p), (sz))
128#define KMP_INTERNAL_CALLOC(n, sz) calloc((n), (sz))
130#include "kmp_debug.h"
132#include "kmp_version.h"
133#include "kmp_barrier.h"
135#include "kmp_debugger.h"
139#define KMP_HANDLE_SIGNALS ((KMP_OS_UNIX && !KMP_OS_WASI) || KMP_OS_WINDOWS)
141#include "kmp_wrapper_malloc.h"
144#if !defined NSIG && defined _NSIG
150#pragma weak clock_gettime
154#include "ompt-internal.h"
158#include "ompd-specific.h"
162#define UNLIKELY(x) (x)
171#ifndef USE_FAST_MEMORY
172#define USE_FAST_MEMORY 3
175#ifndef KMP_NESTED_HOT_TEAMS
176#define KMP_NESTED_HOT_TEAMS 0
177#define USE_NESTED_HOT_ARG(x)
179#if KMP_NESTED_HOT_TEAMS
180#define USE_NESTED_HOT_ARG(x) , x
182#define USE_NESTED_HOT_ARG(x)
187#ifndef USE_CMP_XCHG_FOR_BGET
188#define USE_CMP_XCHG_FOR_BGET 1
196#define KMP_NSEC_PER_SEC 1000000000L
197#define KMP_USEC_PER_SEC 1000000L
198#define KMP_NSEC_PER_USEC 1000L
222 KMP_IDENT_BARRIER_IMPL_MASK = 0x01C0,
223 KMP_IDENT_BARRIER_IMPL_FOR = 0x0040,
224 KMP_IDENT_BARRIER_IMPL_SECTIONS = 0x00C0,
226 KMP_IDENT_BARRIER_IMPL_SINGLE = 0x0140,
227 KMP_IDENT_BARRIER_IMPL_WORKSHARE = 0x01C0,
240 KMP_IDENT_ATOMIC_HINT_UNCONTENDED = 0x010000,
241 KMP_IDENT_ATOMIC_HINT_CONTENDED = 0x020000,
242 KMP_IDENT_ATOMIC_HINT_NONSPECULATIVE = 0x040000,
243 KMP_IDENT_ATOMIC_HINT_SPECULATIVE = 0x080000,
244 KMP_IDENT_OPENMP_SPEC_VERSION_MASK = 0xFF000000
265 kmp_int32 get_openmp_version() {
266 return (((
flags & KMP_IDENT_OPENMP_SPEC_VERSION_MASK) >> 24) & 0xFF);
274typedef union kmp_team kmp_team_t;
275typedef struct kmp_taskdata kmp_taskdata_t;
276typedef union kmp_task_team kmp_task_team_t;
277typedef union kmp_team kmp_team_p;
278typedef union kmp_info kmp_info_p;
279typedef union kmp_root kmp_root_p;
281template <
bool C = false,
bool S = true>
class kmp_flag_32;
282template <
bool C = false,
bool S = true>
class kmp_flag_64;
283template <
bool C = false,
bool S = true>
class kmp_atomic_flag_64;
284class kmp_flag_oncore;
294#define KMP_PACK_64(HIGH_32, LOW_32) \
295 ((kmp_int64)((((kmp_uint64)(HIGH_32)) << 32) | (kmp_uint64)(LOW_32)))
300 while (*(_x) == ' ' || *(_x) == '\t') \
303#define SKIP_DIGITS(_x) \
305 while (*(_x) >= '0' && *(_x) <= '9') \
308#define SKIP_TOKEN(_x) \
310 while ((*(_x) >= '0' && *(_x) <= '9') || (*(_x) >= 'a' && *(_x) <= 'z') || \
311 (*(_x) >= 'A' && *(_x) <= 'Z') || *(_x) == '_') \
314#define SKIP_TO(_x, _c) \
316 while (*(_x) != '\0' && *(_x) != (_c)) \
322#define KMP_MAX(x, y) ((x) > (y) ? (x) : (y))
323#define KMP_MIN(x, y) ((x) < (y) ? (x) : (y))
328enum kmp_state_timer {
338#ifdef USE_LOAD_BALANCE
339 dynamic_load_balance,
342 dynamic_thread_limit,
348#ifndef KMP_SCHED_TYPE_DEFINED
349#define KMP_SCHED_TYPE_DEFINED
350typedef enum kmp_sched {
353 kmp_sched_static = 1,
354 kmp_sched_dynamic = 2,
355 kmp_sched_guided = 3,
357 kmp_sched_upper_std = 5,
358 kmp_sched_lower_ext = 100,
359 kmp_sched_trapezoidal = 101,
360#if KMP_STATIC_STEAL_ENABLED
361 kmp_sched_static_steal = 102,
364 kmp_sched_default = kmp_sched_static,
365 kmp_sched_monotonic = 0x80000000
375 kmp_sch_static_chunked = 33,
377 kmp_sch_dynamic_chunked = 35,
379 kmp_sch_runtime = 37,
381 kmp_sch_trapezoidal = 39,
384 kmp_sch_static_greedy = 40,
385 kmp_sch_static_balanced = 41,
387 kmp_sch_guided_iterative_chunked = 42,
388 kmp_sch_guided_analytical_chunked = 43,
390 kmp_sch_static_steal = 44,
393 kmp_sch_static_balanced_chunked = 45,
401 kmp_ord_static_chunked = 65,
403 kmp_ord_dynamic_chunked = 67,
404 kmp_ord_guided_chunked = 68,
405 kmp_ord_runtime = 69,
407 kmp_ord_trapezoidal = 71,
420 kmp_nm_static_chunked =
423 kmp_nm_dynamic_chunked = 163,
425 kmp_nm_runtime = 165,
427 kmp_nm_trapezoidal = 167,
430 kmp_nm_static_greedy = 168,
431 kmp_nm_static_balanced = 169,
433 kmp_nm_guided_iterative_chunked = 170,
434 kmp_nm_guided_analytical_chunked = 171,
435 kmp_nm_static_steal =
438 kmp_nm_ord_static_chunked = 193,
440 kmp_nm_ord_dynamic_chunked = 195,
441 kmp_nm_ord_guided_chunked = 196,
442 kmp_nm_ord_runtime = 197,
444 kmp_nm_ord_trapezoidal = 199,
466#define SCHEDULE_WITHOUT_MODIFIERS(s) \
469#define SCHEDULE_HAS_MONOTONIC(s) (((s)&kmp_sch_modifier_monotonic) != 0)
470#define SCHEDULE_HAS_NONMONOTONIC(s) (((s)&kmp_sch_modifier_nonmonotonic) != 0)
471#define SCHEDULE_HAS_NO_MODIFIERS(s) \
472 (((s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)) == 0)
473#define SCHEDULE_GET_MODIFIERS(s) \
474 ((enum sched_type)( \
475 (s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)))
476#define SCHEDULE_SET_MODIFIERS(s, m) \
477 (s = (enum sched_type)((kmp_int32)s | (kmp_int32)m))
478#define SCHEDULE_NONMONOTONIC 0
479#define SCHEDULE_MONOTONIC 1
486__kmp_sched_apply_mods_stdkind(kmp_sched_t *kind,
488 if (SCHEDULE_HAS_MONOTONIC(internal_kind)) {
489 *kind = (kmp_sched_t)((
int)*kind | (
int)kmp_sched_monotonic);
495__kmp_sched_apply_mods_intkind(kmp_sched_t kind,
497 if ((
int)kind & (
int)kmp_sched_monotonic) {
498 *internal_kind = (
enum sched_type)((
int)*internal_kind |
504static inline kmp_sched_t __kmp_sched_without_mods(kmp_sched_t kind) {
505 return (kmp_sched_t)((int)kind & ~((int)kmp_sched_monotonic));
509typedef union kmp_r_sched {
528enum mic_type { non_mic, mic1, mic2, mic3, dummy };
532typedef struct kmp_nested_nthreads_t {
536} kmp_nested_nthreads_t;
538extern kmp_nested_nthreads_t __kmp_nested_nth;
542#undef KMP_FAST_REDUCTION_BARRIER
543#define KMP_FAST_REDUCTION_BARRIER 1
545#undef KMP_FAST_REDUCTION_CORE_DUO
546#if KMP_ARCH_X86 || KMP_ARCH_X86_64
547#define KMP_FAST_REDUCTION_CORE_DUO 1
550enum _reduction_method {
551 reduction_method_not_defined = 0,
552 critical_reduce_block = (1 << 8),
553 atomic_reduce_block = (2 << 8),
554 tree_reduce_block = (3 << 8),
555 empty_reduce_block = (4 << 8)
570#if KMP_FAST_REDUCTION_BARRIER
571#define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
572 ((reduction_method) | (barrier_type))
574#define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
575 ((enum _reduction_method)((packed_reduction_method) & (0x0000FF00)))
577#define UNPACK_REDUCTION_BARRIER(packed_reduction_method) \
578 ((enum barrier_type)((packed_reduction_method) & (0x000000FF)))
580#define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
583#define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
584 (packed_reduction_method)
586#define UNPACK_REDUCTION_BARRIER(packed_reduction_method) (bs_plain_barrier)
589#define TEST_REDUCTION_METHOD(packed_reduction_method, which_reduction_block) \
590 ((UNPACK_REDUCTION_METHOD(packed_reduction_method)) == \
591 (which_reduction_block))
593#if KMP_FAST_REDUCTION_BARRIER
594#define TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER \
595 (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_reduction_barrier))
597#define TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER \
598 (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_plain_barrier))
601typedef int PACKED_REDUCTION_METHOD_T;
609#pragma warning(disable : 271 310)
641typedef enum kmp_hw_core_type_t {
642 KMP_HW_CORE_TYPE_UNKNOWN = 0x0,
643#if KMP_ARCH_X86 || KMP_ARCH_X86_64
644 KMP_HW_CORE_TYPE_ATOM = 0x20,
645 KMP_HW_CORE_TYPE_CORE = 0x40,
646 KMP_HW_MAX_NUM_CORE_TYPES = 3,
648 KMP_HW_MAX_NUM_CORE_TYPES = 1,
652#define KMP_HW_MAX_NUM_CORE_EFFS 8
654#define KMP_DEBUG_ASSERT_VALID_HW_TYPE(type) \
655 KMP_DEBUG_ASSERT(type >= (kmp_hw_t)0 && type < KMP_HW_LAST)
656#define KMP_ASSERT_VALID_HW_TYPE(type) \
657 KMP_ASSERT(type >= (kmp_hw_t)0 && type < KMP_HW_LAST)
659#define KMP_FOREACH_HW_TYPE(type) \
660 for (kmp_hw_t type = (kmp_hw_t)0; type < KMP_HW_LAST; \
661 type = (kmp_hw_t)((int)type + 1))
663const char *__kmp_hw_get_keyword(kmp_hw_t type,
bool plural =
false);
664const char *__kmp_hw_get_catalog_string(kmp_hw_t type,
bool plural =
false);
665const char *__kmp_hw_get_core_type_string(kmp_hw_core_type_t type);
668#if KMP_AFFINITY_SUPPORTED
672#if _MSC_VER < 1600 && KMP_MSVC_COMPAT
673typedef struct GROUP_AFFINITY {
679#if KMP_GROUP_AFFINITY
680extern int __kmp_num_proc_groups;
682static const int __kmp_num_proc_groups = 1;
684typedef DWORD (*kmp_GetActiveProcessorCount_t)(WORD);
685extern kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount;
687typedef WORD (*kmp_GetActiveProcessorGroupCount_t)(void);
688extern kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount;
690typedef BOOL (*kmp_GetThreadGroupAffinity_t)(HANDLE, GROUP_AFFINITY *);
691extern kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity;
693typedef BOOL (*kmp_SetThreadGroupAffinity_t)(HANDLE,
const GROUP_AFFINITY *,
695extern kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity;
699extern hwloc_topology_t __kmp_hwloc_topology;
700extern int __kmp_hwloc_error;
703extern size_t __kmp_affin_mask_size;
704#define KMP_AFFINITY_CAPABLE() (__kmp_affin_mask_size > 0)
705#define KMP_AFFINITY_DISABLE() (__kmp_affin_mask_size = 0)
706#define KMP_AFFINITY_ENABLE(mask_size) (__kmp_affin_mask_size = mask_size)
707#define KMP_CPU_SET_ITERATE(i, mask) \
708 for (i = (mask)->begin(); (int)i != (mask)->end(); i = (mask)->next(i))
709#define KMP_CPU_SET(i, mask) (mask)->set(i)
710#define KMP_CPU_ISSET(i, mask) (mask)->is_set(i)
711#define KMP_CPU_CLR(i, mask) (mask)->clear(i)
712#define KMP_CPU_ZERO(mask) (mask)->zero()
713#define KMP_CPU_ISEMPTY(mask) (mask)->empty()
714#define KMP_CPU_COPY(dest, src) (dest)->copy(src)
715#define KMP_CPU_AND(dest, src) (dest)->bitwise_and(src)
716#define KMP_CPU_COMPLEMENT(max_bit_number, mask) (mask)->bitwise_not()
717#define KMP_CPU_UNION(dest, src) (dest)->bitwise_or(src)
718#define KMP_CPU_EQUAL(dest, src) (dest)->is_equal(src)
719#define KMP_CPU_ALLOC(ptr) (ptr = __kmp_affinity_dispatch->allocate_mask())
720#define KMP_CPU_FREE(ptr) __kmp_affinity_dispatch->deallocate_mask(ptr)
721#define KMP_CPU_ALLOC_ON_STACK(ptr) KMP_CPU_ALLOC(ptr)
722#define KMP_CPU_FREE_FROM_STACK(ptr) KMP_CPU_FREE(ptr)
723#define KMP_CPU_INTERNAL_ALLOC(ptr) KMP_CPU_ALLOC(ptr)
724#define KMP_CPU_INTERNAL_FREE(ptr) KMP_CPU_FREE(ptr)
725#define KMP_CPU_INDEX(arr, i) __kmp_affinity_dispatch->index_mask_array(arr, i)
726#define KMP_CPU_ALLOC_ARRAY(arr, n) \
727 (arr = __kmp_affinity_dispatch->allocate_mask_array(n))
728#define KMP_CPU_FREE_ARRAY(arr, n) \
729 __kmp_affinity_dispatch->deallocate_mask_array(arr)
730#define KMP_CPU_INTERNAL_ALLOC_ARRAY(arr, n) KMP_CPU_ALLOC_ARRAY(arr, n)
731#define KMP_CPU_INTERNAL_FREE_ARRAY(arr, n) KMP_CPU_FREE_ARRAY(arr, n)
732#define __kmp_get_system_affinity(mask, abort_bool) \
733 (mask)->get_system_affinity(abort_bool)
734#define __kmp_set_system_affinity(mask, abort_bool) \
735 (mask)->set_system_affinity(abort_bool)
736#define __kmp_get_proc_group(mask) (mask)->get_proc_group()
742 void *
operator new(
size_t n);
743 void operator delete(
void *p);
744 void *
operator new[](
size_t n);
745 void operator delete[](
void *p);
748 virtual void set(
int i) {}
750 virtual bool is_set(
int i)
const {
return false; }
752 virtual void clear(
int i) {}
754 virtual void zero() {}
756 virtual bool empty()
const {
return true; }
758 virtual void copy(
const Mask *src) {}
760 virtual void bitwise_and(
const Mask *rhs) {}
762 virtual void bitwise_or(
const Mask *rhs) {}
764 virtual void bitwise_not() {}
766 virtual bool is_equal(
const Mask *rhs)
const {
return false; }
769 virtual int begin()
const {
return 0; }
770 virtual int end()
const {
return 0; }
771 virtual int next(
int previous)
const {
return 0; }
773 virtual int set_process_affinity(
bool abort_on_error)
const {
return -1; }
776 virtual int set_system_affinity(
bool abort_on_error)
const {
return -1; }
778 virtual int get_system_affinity(
bool abort_on_error) {
return -1; }
781 virtual int get_proc_group()
const {
return -1; }
782 int get_max_cpu()
const {
785 KMP_CPU_SET_ITERATE(cpu,
this) {
792 void *
operator new(
size_t n);
793 void operator delete(
void *p);
795 virtual ~KMPAffinity() =
default;
797 virtual void determine_capable(
const char *env_var) {}
799 virtual void bind_thread(
int proc) {}
801 virtual Mask *allocate_mask() {
return nullptr; }
802 virtual void deallocate_mask(Mask *m) {}
803 virtual Mask *allocate_mask_array(
int num) {
return nullptr; }
804 virtual void deallocate_mask_array(Mask *m) {}
805 virtual Mask *index_mask_array(Mask *m,
int index) {
return nullptr; }
806 static void pick_api();
807 static void destroy_api();
815 virtual api_type get_api_type()
const {
821 static bool picked_api;
824typedef KMPAffinity::Mask kmp_affin_mask_t;
825extern KMPAffinity *__kmp_affinity_dispatch;
828class kmp_affinity_raii_t {
829 kmp_affin_mask_t *mask;
833 kmp_affinity_raii_t(
const kmp_affin_mask_t *new_mask =
nullptr)
834 : mask(nullptr), restored(false) {
835 if (KMP_AFFINITY_CAPABLE()) {
837 KMP_ASSERT(mask != NULL);
838 __kmp_get_system_affinity(mask,
true);
840 __kmp_set_system_affinity(new_mask,
true);
844 if (mask && KMP_AFFINITY_CAPABLE() && !restored) {
845 __kmp_set_system_affinity(mask,
true);
850 ~kmp_affinity_raii_t() { restore(); }
856#define KMP_AFFIN_MASK_PRINT_LEN 1024
870enum affinity_top_method {
871 affinity_top_method_all = 0,
872#if KMP_ARCH_X86 || KMP_ARCH_X86_64
873 affinity_top_method_apicid,
874 affinity_top_method_x2apicid,
875 affinity_top_method_x2apicid_1f,
877 affinity_top_method_cpuinfo,
878#if KMP_GROUP_AFFINITY
879 affinity_top_method_group,
881 affinity_top_method_flat,
883 affinity_top_method_hwloc,
885 affinity_top_method_default
888#define affinity_respect_mask_default (2)
890typedef struct kmp_affinity_flags_t {
892 unsigned verbose : 1;
893 unsigned warnings : 1;
894 unsigned respect : 2;
896 unsigned initialized : 1;
897 unsigned core_types_gran : 1;
898 unsigned core_effs_gran : 1;
899 unsigned omp_places : 1;
900 unsigned reserved : 22;
901} kmp_affinity_flags_t;
902KMP_BUILD_ASSERT(
sizeof(kmp_affinity_flags_t) == 4);
904typedef struct kmp_affinity_ids_t {
906 int ids[KMP_HW_LAST];
909typedef struct kmp_affinity_attrs_t {
913 unsigned reserved : 15;
914} kmp_affinity_attrs_t;
915#define KMP_AFFINITY_ATTRS_UNKNOWN \
916 { KMP_HW_CORE_TYPE_UNKNOWN, kmp_hw_attr_t::UNKNOWN_CORE_EFF, 0, 0 }
918typedef struct kmp_affinity_t {
920 enum affinity_type type;
923 kmp_affinity_attrs_t core_attr_gran;
926 kmp_affinity_flags_t flags;
928 kmp_affin_mask_t *masks;
929 kmp_affinity_ids_t *ids;
930 kmp_affinity_attrs_t *attrs;
931 unsigned num_os_id_masks;
932 kmp_affin_mask_t *os_id_masks;
936#define KMP_AFFINITY_INIT(env) \
938 nullptr, affinity_default, KMP_HW_UNKNOWN, -1, KMP_AFFINITY_ATTRS_UNKNOWN, \
940 {TRUE, FALSE, TRUE, affinity_respect_mask_default, FALSE, FALSE, \
941 FALSE, FALSE, FALSE}, \
942 0, nullptr, nullptr, nullptr, 0, nullptr, env \
945extern enum affinity_top_method __kmp_affinity_top_method;
946extern kmp_affinity_t __kmp_affinity;
947extern kmp_affinity_t __kmp_hh_affinity;
948extern kmp_affinity_t *__kmp_affinities[2];
950extern void __kmp_affinity_bind_thread(
int which);
952extern kmp_affin_mask_t *__kmp_affin_fullMask;
953extern kmp_affin_mask_t *__kmp_affin_origMask;
954extern char *__kmp_cpuinfo_file;
956#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
957extern int __kmp_first_osid_with_ecore;
963typedef enum kmp_proc_bind_t {
973typedef struct kmp_nested_proc_bind_t {
974 kmp_proc_bind_t *bind_types;
977} kmp_nested_proc_bind_t;
979extern kmp_nested_proc_bind_t __kmp_nested_proc_bind;
980extern kmp_proc_bind_t __kmp_teams_proc_bind;
982extern int __kmp_display_affinity;
983extern char *__kmp_affinity_format;
984static const size_t KMP_AFFINITY_FORMAT_SIZE = 512;
986extern int __kmp_tool;
987extern char *__kmp_tool_libraries;
990#if KMP_AFFINITY_SUPPORTED
991#define KMP_PLACE_ALL (-1)
992#define KMP_PLACE_UNDEFINED (-2)
994#define KMP_AFFINITY_NON_PROC_BIND \
995 ((__kmp_nested_proc_bind.bind_types[0] == proc_bind_false || \
996 __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) && \
997 (__kmp_affinity.num_masks > 0 || __kmp_affinity.type == affinity_balanced))
1000extern int __kmp_affinity_num_places;
1002typedef enum kmp_cancel_kind_t {
1004 cancel_parallel = 1,
1006 cancel_sections = 3,
1007 cancel_taskgroup = 4
1011typedef struct kmp_hws_item {
1016extern kmp_hws_item_t __kmp_hws_socket;
1017extern kmp_hws_item_t __kmp_hws_die;
1018extern kmp_hws_item_t __kmp_hws_node;
1019extern kmp_hws_item_t __kmp_hws_tile;
1020extern kmp_hws_item_t __kmp_hws_core;
1021extern kmp_hws_item_t __kmp_hws_proc;
1022extern int __kmp_hws_requested;
1023extern int __kmp_hws_abs_flag;
1027#define KMP_PAD(type, sz) \
1028 (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
1032#define KMP_GTID_DNE (-2)
1033#define KMP_GTID_SHUTDOWN (-3)
1034#define KMP_GTID_MONITOR (-4)
1035#define KMP_GTID_UNKNOWN (-5)
1036#define KMP_GTID_MIN (-6)
1042typedef uintptr_t omp_uintptr_t;
1045 omp_atk_sync_hint = 1,
1046 omp_atk_alignment = 2,
1048 omp_atk_pool_size = 4,
1049 omp_atk_fallback = 5,
1050 omp_atk_fb_data = 6,
1052 omp_atk_partition = 8,
1053 omp_atk_pin_device = 9,
1054 omp_atk_preferred_device = 10,
1055 omp_atk_device_access = 11,
1056 omp_atk_target_access = 12,
1057 omp_atk_atomic_scope = 13,
1058 omp_atk_part_size = 14
1059} omp_alloctrait_key_t;
1064 omp_atv_contended = 3,
1065 omp_atv_uncontended = 4,
1066 omp_atv_serialized = 5,
1067 omp_atv_sequential = omp_atv_serialized,
1068 omp_atv_private = 6,
1072 omp_atv_cgroup = 10,
1073 omp_atv_default_mem_fb = 11,
1074 omp_atv_null_fb = 12,
1075 omp_atv_abort_fb = 13,
1076 omp_atv_allocator_fb = 14,
1077 omp_atv_environment = 15,
1078 omp_atv_nearest = 16,
1079 omp_atv_blocked = 17,
1080 omp_atv_interleaved = 18,
1082 omp_atv_single = 20,
1083 omp_atv_multiple = 21,
1084 omp_atv_memspace = 22
1085} omp_alloctrait_value_t;
1086#define omp_atv_default ((omp_uintptr_t)-1)
1088typedef void *omp_memspace_handle_t;
1089extern omp_memspace_handle_t
const omp_null_mem_space;
1090extern omp_memspace_handle_t
const omp_default_mem_space;
1091extern omp_memspace_handle_t
const omp_large_cap_mem_space;
1092extern omp_memspace_handle_t
const omp_const_mem_space;
1093extern omp_memspace_handle_t
const omp_high_bw_mem_space;
1094extern omp_memspace_handle_t
const omp_low_lat_mem_space;
1095extern omp_memspace_handle_t
const llvm_omp_target_host_mem_space;
1096extern omp_memspace_handle_t
const llvm_omp_target_shared_mem_space;
1097extern omp_memspace_handle_t
const llvm_omp_target_device_mem_space;
1098extern omp_memspace_handle_t
const kmp_max_mem_space;
1101 omp_alloctrait_key_t key;
1102 omp_uintptr_t value;
1105typedef void *omp_allocator_handle_t;
1106extern omp_allocator_handle_t
const omp_null_allocator;
1107extern omp_allocator_handle_t
const omp_default_mem_alloc;
1108extern omp_allocator_handle_t
const omp_large_cap_mem_alloc;
1109extern omp_allocator_handle_t
const omp_const_mem_alloc;
1110extern omp_allocator_handle_t
const omp_high_bw_mem_alloc;
1111extern omp_allocator_handle_t
const omp_low_lat_mem_alloc;
1112extern omp_allocator_handle_t
const omp_cgroup_mem_alloc;
1113extern omp_allocator_handle_t
const omp_pteam_mem_alloc;
1114extern omp_allocator_handle_t
const omp_thread_mem_alloc;
1115extern omp_allocator_handle_t
const llvm_omp_target_host_mem_alloc;
1116extern omp_allocator_handle_t
const llvm_omp_target_shared_mem_alloc;
1117extern omp_allocator_handle_t
const llvm_omp_target_device_mem_alloc;
1118extern omp_allocator_handle_t
const kmp_max_mem_alloc;
1119extern omp_allocator_handle_t __kmp_def_allocator;
1124extern int __kmp_memkind_available;
1125extern bool __kmp_hwloc_available;
1129 omp_memspace_handle_t memspace;
1130 int num_resources = 0;
1131 int *resources =
nullptr;
1137 omp_memspace_handle_t memspace;
1140 omp_alloctrait_value_t fb;
1142 kmp_uint64 pool_size;
1143 kmp_uint64 pool_used;
1145 omp_alloctrait_value_t partition;
1147 int preferred_device;
1148 omp_alloctrait_value_t target_access;
1149 omp_alloctrait_value_t atomic_scope;
1151#if KMP_HWLOC_ENABLED
1152 omp_alloctrait_value_t membind;
1156extern omp_allocator_handle_t __kmpc_init_allocator(
int gtid,
1157 omp_memspace_handle_t,
1159 omp_alloctrait_t traits[]);
1160extern void __kmpc_destroy_allocator(
int gtid, omp_allocator_handle_t al);
1161extern void __kmpc_set_default_allocator(
int gtid, omp_allocator_handle_t al);
1162extern omp_allocator_handle_t __kmpc_get_default_allocator(
int gtid);
1164extern void *__kmpc_alloc(
int gtid,
size_t sz, omp_allocator_handle_t al);
1165extern void *__kmpc_aligned_alloc(
int gtid,
size_t align,
size_t sz,
1166 omp_allocator_handle_t al);
1167extern void *__kmpc_calloc(
int gtid,
size_t nmemb,
size_t sz,
1168 omp_allocator_handle_t al);
1169extern void *__kmpc_realloc(
int gtid,
void *ptr,
size_t sz,
1170 omp_allocator_handle_t al,
1171 omp_allocator_handle_t free_al);
1172extern void __kmpc_free(
int gtid,
void *ptr, omp_allocator_handle_t al);
1174extern void *__kmp_alloc(
int gtid,
size_t align,
size_t sz,
1175 omp_allocator_handle_t al);
1176extern void *__kmp_calloc(
int gtid,
size_t align,
size_t nmemb,
size_t sz,
1177 omp_allocator_handle_t al);
1178extern void *__kmp_realloc(
int gtid,
void *ptr,
size_t sz,
1179 omp_allocator_handle_t al,
1180 omp_allocator_handle_t free_al);
1181extern void ___kmpc_free(
int gtid,
void *ptr, omp_allocator_handle_t al);
1183extern void __kmp_init_memkind();
1184extern void __kmp_fini_memkind();
1185extern void __kmp_init_target_mem();
1186extern void __kmp_fini_target_mem();
1189extern omp_memspace_handle_t __kmp_get_devices_memspace(
int ndevs,
1191 omp_memspace_handle_t,
1193extern omp_allocator_handle_t __kmp_get_devices_allocator(
int ndevs,
1195 omp_memspace_handle_t,
1197extern int __kmp_get_memspace_num_resources(omp_memspace_handle_t memspace);
1198extern omp_memspace_handle_t
1199__kmp_get_submemspace(omp_memspace_handle_t memspace,
int num_resources,
1204#if ENABLE_LIBOMPTARGET
1205extern void __kmp_init_target_task();
1210#define KMP_UINT64_MAX \
1211 (~((kmp_uint64)1 << ((sizeof(kmp_uint64) * (1 << 3)) - 1)))
1213#define KMP_MIN_NTH 1
1216#if defined(PTHREAD_THREADS_MAX) && PTHREAD_THREADS_MAX < INT_MAX
1217#define KMP_MAX_NTH PTHREAD_THREADS_MAX
1223#define KMP_MAX_NTH 64
1225#define KMP_MAX_NTH INT_MAX
1230#ifdef PTHREAD_STACK_MIN
1231#define KMP_MIN_STKSIZE ((size_t)PTHREAD_STACK_MIN)
1233#define KMP_MIN_STKSIZE ((size_t)(32 * 1024))
1236#if KMP_OS_AIX && KMP_ARCH_PPC
1237#define KMP_MAX_STKSIZE 0x10000000
1239#define KMP_MAX_STKSIZE (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
1243#define KMP_DEFAULT_STKSIZE ((size_t)(2 * 1024 * 1024))
1244#elif KMP_ARCH_X86_64
1245#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1246#define KMP_BACKUP_STKSIZE ((size_t)(2 * 1024 * 1024))
1250#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1253#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1255#define KMP_DEFAULT_STKSIZE ((size_t)(1024 * 1024))
1258#define KMP_DEFAULT_MALLOC_POOL_INCR ((size_t)(1024 * 1024))
1259#define KMP_MIN_MALLOC_POOL_INCR ((size_t)(4 * 1024))
1260#define KMP_MAX_MALLOC_POOL_INCR \
1261 (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
1263#define KMP_MIN_STKOFFSET (0)
1264#define KMP_MAX_STKOFFSET KMP_MAX_STKSIZE
1266#define KMP_DEFAULT_STKOFFSET KMP_MIN_STKOFFSET
1268#define KMP_DEFAULT_STKOFFSET CACHE_LINE
1271#define KMP_MIN_STKPADDING (0)
1272#define KMP_MAX_STKPADDING (2 * 1024 * 1024)
1274#define KMP_BLOCKTIME_MULTIPLIER \
1276#define KMP_MIN_BLOCKTIME (0)
1277#define KMP_MAX_BLOCKTIME \
1281#define KMP_DEFAULT_BLOCKTIME (__kmp_is_hybrid_cpu() ? (0) : (200000))
1284#define KMP_DEFAULT_MONITOR_STKSIZE ((size_t)(64 * 1024))
1285#define KMP_MIN_MONITOR_WAKEUPS (1)
1286#define KMP_MAX_MONITOR_WAKEUPS (1000)
1290#define KMP_WAKEUPS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
1291 (((blocktime) == KMP_MAX_BLOCKTIME) ? (monitor_wakeups) \
1292 : ((blocktime) == KMP_MIN_BLOCKTIME) ? KMP_MAX_MONITOR_WAKEUPS \
1293 : ((monitor_wakeups) > (KMP_BLOCKTIME_MULTIPLIER / (blocktime))) \
1294 ? (monitor_wakeups) \
1295 : (KMP_BLOCKTIME_MULTIPLIER) / (blocktime))
1299#define KMP_INTERVALS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
1300 (((blocktime) + (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)) - 1) / \
1301 (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)))
1303#define KMP_BLOCKTIME(team, tid) \
1304 (get__bt_set(team, tid) ? get__blocktime(team, tid) : __kmp_dflt_blocktime)
1305#if KMP_OS_UNIX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1307extern kmp_uint64 __kmp_ticks_per_msec;
1308extern kmp_uint64 __kmp_ticks_per_usec;
1309#if KMP_COMPILER_ICC || KMP_COMPILER_ICX
1310#define KMP_NOW() ((kmp_uint64)_rdtsc())
1312#define KMP_NOW() __kmp_hardware_timestamp()
1314#define KMP_BLOCKTIME_INTERVAL(team, tid) \
1315 ((kmp_uint64)KMP_BLOCKTIME(team, tid) * __kmp_ticks_per_usec)
1316#define KMP_BLOCKING(goal, count) ((goal) > KMP_NOW())
1319extern kmp_uint64 __kmp_now_nsec();
1320#define KMP_NOW() __kmp_now_nsec()
1321#define KMP_BLOCKTIME_INTERVAL(team, tid) \
1322 ((kmp_uint64)KMP_BLOCKTIME(team, tid) * (kmp_uint64)KMP_NSEC_PER_USEC)
1323#define KMP_BLOCKING(goal, count) ((count) % 1000 != 0 || (goal) > KMP_NOW())
1327#define KMP_MIN_STATSCOLS 40
1328#define KMP_MAX_STATSCOLS 4096
1329#define KMP_DEFAULT_STATSCOLS 80
1331#define KMP_MIN_INTERVAL 0
1332#define KMP_MAX_INTERVAL (INT_MAX - 1)
1333#define KMP_DEFAULT_INTERVAL 0
1335#define KMP_MIN_CHUNK 1
1336#define KMP_MAX_CHUNK (INT_MAX - 1)
1337#define KMP_DEFAULT_CHUNK 1
1339#define KMP_MIN_DISP_NUM_BUFF 1
1340#define KMP_DFLT_DISP_NUM_BUFF 7
1341#define KMP_MAX_DISP_NUM_BUFF 4096
1343#define KMP_MAX_ORDERED 8
1345#define KMP_MAX_FIELDS 32
1347#define KMP_MAX_BRANCH_BITS 31
1349#define KMP_MAX_ACTIVE_LEVELS_LIMIT INT_MAX
1351#define KMP_MAX_DEFAULT_DEVICE_LIMIT INT_MAX
1353#define KMP_MAX_TASK_PRIORITY_LIMIT INT_MAX
1358#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1359#define KMP_TLS_GTID_MIN 5
1361#define KMP_TLS_GTID_MIN INT_MAX
1364#define KMP_MASTER_TID(tid) (0 == (tid))
1365#define KMP_WORKER_TID(tid) (0 != (tid))
1367#define KMP_MASTER_GTID(gtid) (0 == __kmp_tid_from_gtid((gtid)))
1368#define KMP_WORKER_GTID(gtid) (0 != __kmp_tid_from_gtid((gtid)))
1369#define KMP_INITIAL_GTID(gtid) (0 == (gtid))
1373#define TRUE (!FALSE)
1379#define KMP_INIT_WAIT 64U
1380#define KMP_NEXT_WAIT 32U
1382#define KMP_INIT_WAIT 1024U
1383#define KMP_NEXT_WAIT 512U
1386#define KMP_INIT_WAIT 1024U
1387#define KMP_NEXT_WAIT 512U
1388#elif KMP_OS_DRAGONFLY
1390#define KMP_INIT_WAIT 1024U
1391#define KMP_NEXT_WAIT 512U
1394#define KMP_INIT_WAIT 1024U
1395#define KMP_NEXT_WAIT 512U
1398#define KMP_INIT_WAIT 1024U
1399#define KMP_NEXT_WAIT 512U
1402#define KMP_INIT_WAIT 1024U
1403#define KMP_NEXT_WAIT 512U
1406#define KMP_INIT_WAIT 1024U
1407#define KMP_NEXT_WAIT 512U
1410#define KMP_INIT_WAIT 1024U
1411#define KMP_NEXT_WAIT 512U
1414#define KMP_INIT_WAIT 1024U
1415#define KMP_NEXT_WAIT 512U
1418#define KMP_INIT_WAIT 1024U
1419#define KMP_NEXT_WAIT 512U
1422#define KMP_INIT_WAIT 1024U
1423#define KMP_NEXT_WAIT 512U
1426#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1427typedef struct kmp_cpuid {
1434typedef struct kmp_cpuinfo_flags_t {
1437 unsigned hybrid : 1;
1438 unsigned reserved : 29;
1439} kmp_cpuinfo_flags_t;
1441typedef struct kmp_cpuinfo {
1448 kmp_cpuinfo_flags_t flags;
1450 kmp_uint64 frequency;
1451 char name[3 *
sizeof(kmp_cpuid_t)];
1454extern void __kmp_query_cpuid(kmp_cpuinfo_t *p);
1459static inline void __kmp_x86_cpuid(
int leaf,
int subleaf,
struct kmp_cpuid *p) {
1460 __asm__ __volatile__(
"cpuid"
1461 :
"=a"(p->eax),
"=b"(p->ebx),
"=c"(p->ecx),
"=d"(p->edx)
1462 :
"a"(leaf),
"c"(subleaf));
1465static inline void __kmp_load_x87_fpu_control_word(
const kmp_int16 *p) {
1466 __asm__ __volatile__(
"fldcw %0" : :
"m"(*p));
1469static inline void __kmp_store_x87_fpu_control_word(kmp_int16 *p) {
1470 __asm__ __volatile__(
"fstcw %0" :
"=m"(*p));
1472static inline void __kmp_clear_x87_fpu_status_word() {
1475 struct x87_fpu_state {
1484 struct x87_fpu_state fpu_state = {0, 0, 0, 0, 0, 0, 0};
1485 __asm__ __volatile__(
"fstenv %0\n\t"
1486 "andw $0x7f00, %1\n\t"
1488 :
"+m"(fpu_state),
"+m"(fpu_state.sw));
1490 __asm__ __volatile__(
"fnclex");
1494static inline void __kmp_load_mxcsr(
const kmp_uint32 *p) { _mm_setcsr(*p); }
1495static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = _mm_getcsr(); }
1497static inline void __kmp_load_mxcsr(
const kmp_uint32 *p) {}
1498static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = 0; }
1502extern void __kmp_x86_cpuid(
int mode,
int mode2,
struct kmp_cpuid *p);
1503extern void __kmp_load_x87_fpu_control_word(
const kmp_int16 *p);
1504extern void __kmp_store_x87_fpu_control_word(kmp_int16 *p);
1505extern void __kmp_clear_x87_fpu_status_word();
1506static inline void __kmp_load_mxcsr(
const kmp_uint32 *p) { _mm_setcsr(*p); }
1507static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = _mm_getcsr(); }
1510#define KMP_X86_MXCSR_MASK 0xffffffc0
1515#if KMP_HAVE_WAITPKG_INTRINSICS
1516#if KMP_HAVE_IMMINTRIN_H
1517#include <immintrin.h>
1518#elif KMP_HAVE_INTRIN_H
1523KMP_ATTRIBUTE_TARGET_WAITPKG
1524static inline int __kmp_tpause(uint32_t hint, uint64_t counter) {
1525#if !KMP_HAVE_WAITPKG_INTRINSICS
1526 uint32_t timeHi = uint32_t(counter >> 32);
1527 uint32_t timeLo = uint32_t(counter & 0xffffffff);
1529 __asm__
volatile(
"#tpause\n.byte 0x66, 0x0F, 0xAE, 0xF1\n"
1535 :
"a"(timeLo),
"d"(timeHi),
"c"(hint)
1539 return _tpause(hint, counter);
1542KMP_ATTRIBUTE_TARGET_WAITPKG
1543static inline void __kmp_umonitor(
void *cacheline) {
1544#if !KMP_HAVE_WAITPKG_INTRINSICS
1545 __asm__
volatile(
"# umonitor\n.byte 0xF3, 0x0F, 0xAE, 0x01 "
1550 _umonitor(cacheline);
1553KMP_ATTRIBUTE_TARGET_WAITPKG
1554static inline int __kmp_umwait(uint32_t hint, uint64_t counter) {
1555#if !KMP_HAVE_WAITPKG_INTRINSICS
1556 uint32_t timeHi = uint32_t(counter >> 32);
1557 uint32_t timeLo = uint32_t(counter & 0xffffffff);
1559 __asm__
volatile(
"#umwait\n.byte 0xF2, 0x0F, 0xAE, 0xF1\n"
1565 :
"a"(timeLo),
"d"(timeHi),
"c"(hint)
1569 return _umwait(hint, counter);
1574#include <pmmintrin.h>
1579__attribute__((target(
"sse3")))
1582__kmp_mm_monitor(
void *cacheline,
unsigned extensions,
unsigned hints) {
1583 _mm_monitor(cacheline, extensions, hints);
1586__attribute__((target(
"sse3")))
1589__kmp_mm_mwait(
unsigned extensions,
unsigned hints) {
1590 _mm_mwait(extensions, hints);
1595extern void __kmp_x86_pause(
void);
1601static inline void __kmp_x86_pause(
void) { _mm_delay_32(300); }
1603static inline void __kmp_x86_pause(
void) { _mm_pause(); }
1605#define KMP_CPU_PAUSE() __kmp_x86_pause()
1607#define KMP_PPC64_PRI_LOW() __asm__ volatile("or 1, 1, 1")
1608#define KMP_PPC64_PRI_MED() __asm__ volatile("or 2, 2, 2")
1609#define KMP_PPC64_PRI_LOC_MB() __asm__ volatile("" : : : "memory")
1610#define KMP_CPU_PAUSE() \
1612 KMP_PPC64_PRI_LOW(); \
1613 KMP_PPC64_PRI_MED(); \
1614 KMP_PPC64_PRI_LOC_MB(); \
1617#define KMP_CPU_PAUSE()
1620#define KMP_INIT_YIELD(count) \
1621 { (count) = __kmp_yield_init; }
1623#define KMP_INIT_BACKOFF(time) \
1624 { (time) = __kmp_pause_init; }
1626#define KMP_OVERSUBSCRIBED \
1627 (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc))
1629#define KMP_TRY_YIELD \
1630 ((__kmp_use_yield == 1) || (__kmp_use_yield == 2 && (KMP_OVERSUBSCRIBED)))
1632#define KMP_TRY_YIELD_OVERSUB \
1633 ((__kmp_use_yield == 1 || __kmp_use_yield == 2) && (KMP_OVERSUBSCRIBED))
1635#define KMP_YIELD(cond) \
1638 if ((cond) && (KMP_TRY_YIELD)) \
1642#define KMP_YIELD_OVERSUB() \
1645 if ((KMP_TRY_YIELD_OVERSUB)) \
1651#define KMP_YIELD_SPIN(count) \
1654 if (KMP_TRY_YIELD) { \
1658 (count) = __kmp_yield_next; \
1669#define KMP_TPAUSE_MAX_MASK ((kmp_uint64)0xFFFF)
1670#define KMP_YIELD_OVERSUB_ELSE_SPIN(count, time) \
1672 if (__kmp_tpause_enabled) { \
1673 if (KMP_OVERSUBSCRIBED) { \
1674 __kmp_tpause(0, (time)); \
1676 __kmp_tpause(__kmp_tpause_hint, (time)); \
1678 (time) = (time << 1 | 1) & KMP_TPAUSE_MAX_MASK; \
1681 if ((KMP_TRY_YIELD_OVERSUB)) { \
1683 } else if (__kmp_use_yield == 1) { \
1687 (count) = __kmp_yield_next; \
1693#define KMP_YIELD_OVERSUB_ELSE_SPIN(count, time) \
1696 if ((KMP_TRY_YIELD_OVERSUB)) \
1698 else if (__kmp_use_yield == 1) { \
1702 (count) = __kmp_yield_next; \
1722 ct_ordered_in_parallel,
1730#define IS_CONS_TYPE_ORDERED(ct) ((ct) == ct_pdo_ordered)
1734 enum cons_type type;
1741 int p_top, w_top, s_top;
1742 int stack_size, stack_top;
1743 struct cons_data *stack_data;
1746struct kmp_region_info {
1748 int offset[KMP_MAX_FIELDS];
1749 int length[KMP_MAX_FIELDS];
1756typedef HANDLE kmp_thread_t;
1757typedef DWORD kmp_key_t;
1761typedef pthread_t kmp_thread_t;
1762typedef pthread_key_t kmp_key_t;
1765extern kmp_key_t __kmp_gtid_threadprivate_key;
1767typedef struct kmp_sys_info {
1781typedef int kmp_itt_mark_t;
1782#define KMP_ITT_DEBUG 0
1785typedef kmp_int32 kmp_critical_name[8];
1796typedef void (*
kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid, ...);
1797typedef void (*kmpc_micro_bound)(kmp_int32 *bound_tid, kmp_int32 *bound_nth,
1814typedef void *(*kmpc_ctor)(
void *);
1827typedef void *(*kmpc_cctor)(
void *,
void *);
1837typedef void *(*kmpc_ctor_vec)(
void *, size_t);
1849typedef void *(*kmpc_cctor_vec)(
void *,
void *,
1857typedef struct kmp_cached_addr {
1859 void ***compiler_cache;
1861 struct kmp_cached_addr *next;
1864struct private_data {
1865 struct private_data *next;
1871struct private_common {
1872 struct private_common *next;
1873 struct private_common *link;
1879struct shared_common {
1880 struct shared_common *next;
1881 struct private_data *pod_init;
1901#define KMP_HASH_TABLE_LOG2 9
1902#define KMP_HASH_TABLE_SIZE \
1903 (1 << KMP_HASH_TABLE_LOG2)
1904#define KMP_HASH_SHIFT 3
1905#define KMP_HASH(x) \
1906 ((((kmp_uintptr_t)x) >> KMP_HASH_SHIFT) & (KMP_HASH_TABLE_SIZE - 1))
1908struct common_table {
1909 struct private_common *data[KMP_HASH_TABLE_SIZE];
1912struct shared_table {
1913 struct shared_common *data[KMP_HASH_TABLE_SIZE];
1918#if KMP_USE_HIER_SCHED
1921typedef struct kmp_hier_private_bdata_t {
1922 kmp_int32 num_active;
1924 kmp_uint64 wait_val[2];
1925} kmp_hier_private_bdata_t;
1928typedef struct kmp_sched_flags {
1929 unsigned ordered : 1;
1930 unsigned nomerge : 1;
1931 unsigned contains_last : 1;
1932 unsigned use_hier : 1;
1933 unsigned use_hybrid : 1;
1934 unsigned unused : 27;
1937KMP_BUILD_ASSERT(
sizeof(kmp_sched_flags_t) == 4);
1939#if KMP_STATIC_STEAL_ENABLED
1940typedef struct KMP_ALIGN_CACHE dispatch_private_info32 {
1947 kmp_lock_t *steal_lock;
1949 kmp_uint32 ordered_lower;
1950 kmp_uint32 ordered_upper;
1958 struct KMP_ALIGN(32) {
1965#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
1967 kmp_uint32 num_procs_with_pcore;
1968 kmp_int32 first_thread_with_ecore;
1971 kmp_int32 last_upper;
1973} dispatch_private_info32_t;
1975#if CACHE_LINE <= 128
1976KMP_BUILD_ASSERT(
sizeof(dispatch_private_info32_t) <= 128);
1979typedef struct KMP_ALIGN_CACHE dispatch_private_info64 {
1986 kmp_lock_t *steal_lock;
1988 kmp_uint64 ordered_lower;
1989 kmp_uint64 ordered_upper;
1997 struct KMP_ALIGN(32) {
2004#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
2006 kmp_uint64 num_procs_with_pcore;
2007 kmp_int64 first_thread_with_ecore;
2011 kmp_int64 last_upper;
2013} dispatch_private_info64_t;
2015#if CACHE_LINE <= 128
2016KMP_BUILD_ASSERT(
sizeof(dispatch_private_info64_t) <= 128);
2020typedef struct KMP_ALIGN_CACHE dispatch_private_info32 {
2033 kmp_uint32 ordered_lower;
2034 kmp_uint32 ordered_upper;
2036 kmp_int32 last_upper;
2038} dispatch_private_info32_t;
2040typedef struct KMP_ALIGN_CACHE dispatch_private_info64 {
2054 kmp_uint64 ordered_lower;
2055 kmp_uint64 ordered_upper;
2057 kmp_int64 last_upper;
2059} dispatch_private_info64_t;
2062typedef struct KMP_ALIGN_CACHE dispatch_private_info {
2063 union private_info {
2064 dispatch_private_info32_t p32;
2065 dispatch_private_info64_t p64;
2068 kmp_sched_flags_t flags;
2069 std::atomic<kmp_uint32> steal_flag;
2070 kmp_int32 ordered_bumped;
2072 struct dispatch_private_info *next;
2073 kmp_int32 type_size;
2074#if KMP_USE_HIER_SCHED
2078 enum cons_type pushed_ws;
2079} dispatch_private_info_t;
2081typedef struct dispatch_shared_info32 {
2084 volatile kmp_uint32 iteration;
2085 volatile kmp_int32 num_done;
2086 volatile kmp_uint32 ordered_iteration;
2088 kmp_int32 ordered_dummy[KMP_MAX_ORDERED - 1];
2089} dispatch_shared_info32_t;
2091typedef struct dispatch_shared_info64 {
2094 volatile kmp_uint64 iteration;
2095 volatile kmp_int64 num_done;
2096 volatile kmp_uint64 ordered_iteration;
2098 kmp_int64 ordered_dummy[KMP_MAX_ORDERED - 3];
2099} dispatch_shared_info64_t;
2101typedef struct dispatch_shared_info {
2103 dispatch_shared_info32_t s32;
2104 dispatch_shared_info64_t s64;
2106 volatile kmp_uint32 buffer_index;
2107 volatile kmp_int32 doacross_buf_idx;
2108 volatile kmp_uint32 *doacross_flags;
2109 kmp_int32 doacross_num_done;
2110#if KMP_USE_HIER_SCHED
2113#if KMP_HWLOC_ENABLED
2119} dispatch_shared_info_t;
2121typedef struct kmp_disp {
2123 void (*th_deo_fcn)(
int *gtid,
int *cid,
ident_t *);
2125 void (*th_dxo_fcn)(
int *gtid,
int *cid,
ident_t *);
2127 dispatch_shared_info_t *th_dispatch_sh_current;
2128 dispatch_private_info_t *th_dispatch_pr_current;
2130 dispatch_private_info_t *th_disp_buffer;
2131 kmp_uint32 th_disp_index;
2132 kmp_int32 th_doacross_buf_idx;
2133 volatile kmp_uint32 *th_doacross_flags;
2134 kmp_int64 *th_doacross_info;
2135#if KMP_USE_INTERNODE_ALIGNMENT
2136 char more_padding[INTERNODE_CACHE_LINE];
2144#define KMP_INIT_BARRIER_STATE 0
2145#define KMP_BARRIER_SLEEP_BIT 0
2146#define KMP_BARRIER_UNUSED_BIT 1
2147#define KMP_BARRIER_BUMP_BIT 2
2149#define KMP_BARRIER_SLEEP_STATE (1 << KMP_BARRIER_SLEEP_BIT)
2150#define KMP_BARRIER_UNUSED_STATE (1 << KMP_BARRIER_UNUSED_BIT)
2151#define KMP_BARRIER_STATE_BUMP (1 << KMP_BARRIER_BUMP_BIT)
2153#if (KMP_BARRIER_SLEEP_BIT >= KMP_BARRIER_BUMP_BIT)
2154#error "Barrier sleep bit must be smaller than barrier bump bit"
2156#if (KMP_BARRIER_UNUSED_BIT >= KMP_BARRIER_BUMP_BIT)
2157#error "Barrier unused bit must be smaller than barrier bump bit"
2161#define KMP_BARRIER_NOT_WAITING 0
2162#define KMP_BARRIER_OWN_FLAG \
2164#define KMP_BARRIER_PARENT_FLAG \
2166#define KMP_BARRIER_SWITCH_TO_OWN_FLAG \
2168#define KMP_BARRIER_SWITCHING \
2171#define KMP_NOT_SAFE_TO_REAP \
2173#define KMP_SAFE_TO_REAP 1
2185 bs_plain_barrier = 0,
2187 bs_forkjoin_barrier,
2188#if KMP_FAST_REDUCTION_BARRIER
2189 bs_reduction_barrier,
2195#if !KMP_FAST_REDUCTION_BARRIER
2196#define bs_reduction_barrier bs_plain_barrier
2199typedef enum kmp_bar_pat {
2206 bp_hierarchical_bar = 3,
2211#define KMP_BARRIER_ICV_PUSH 1
2214typedef struct kmp_internal_control {
2215 int serial_nesting_level;
2228 int task_thread_limit;
2229 int max_active_levels;
2232 kmp_proc_bind_t proc_bind;
2233 kmp_int32 default_device;
2234 struct kmp_internal_control *next;
2235} kmp_internal_control_t;
2237static inline void copy_icvs(kmp_internal_control_t *dst,
2238 kmp_internal_control_t *src) {
2243typedef struct KMP_ALIGN_CACHE kmp_bstate {
2248 kmp_internal_control_t th_fixed_icvs;
2251 volatile kmp_uint64 b_go;
2252 KMP_ALIGN_CACHE
volatile kmp_uint64
2254 kmp_uint32 *skip_per_level;
2255 kmp_uint32 my_level;
2256 kmp_int32 parent_tid;
2259 struct kmp_bstate *parent_bar;
2261 kmp_uint64 leaf_state;
2263 kmp_uint8 base_leaf_kids;
2264 kmp_uint8 leaf_kids;
2266 kmp_uint8 wait_flag;
2267 kmp_uint8 use_oncore_barrier;
2272 KMP_ALIGN_CACHE kmp_uint b_worker_arrived;
2276union KMP_ALIGN_CACHE kmp_barrier_union {
2278 char b_pad[KMP_PAD(kmp_bstate_t, CACHE_LINE)];
2282typedef union kmp_barrier_union kmp_balign_t;
2285union KMP_ALIGN_CACHE kmp_barrier_team_union {
2287 char b_pad[CACHE_LINE];
2289 kmp_uint64 b_arrived;
2295 kmp_uint b_master_arrived;
2296 kmp_uint b_team_arrived;
2301typedef union kmp_barrier_team_union kmp_balign_team_t;
2308typedef struct kmp_win32_mutex {
2310 CRITICAL_SECTION cs;
2313typedef struct kmp_win32_cond {
2318 kmp_win32_mutex_t waiters_count_lock_;
2325 int wait_generation_count_;
2334union KMP_ALIGN_CACHE kmp_cond_union {
2336 char c_pad[CACHE_LINE];
2337 pthread_cond_t c_cond;
2340typedef union kmp_cond_union kmp_cond_align_t;
2342union KMP_ALIGN_CACHE kmp_mutex_union {
2344 char m_pad[CACHE_LINE];
2345 pthread_mutex_t m_mutex;
2348typedef union kmp_mutex_union kmp_mutex_align_t;
2352typedef struct kmp_desc_base {
2354 size_t ds_stacksize;
2356 kmp_thread_t ds_thread;
2357 volatile int ds_tid;
2360 volatile int ds_alive;
2377typedef union KMP_ALIGN_CACHE kmp_desc {
2379 char ds_pad[KMP_PAD(kmp_desc_base_t, CACHE_LINE)];
2383typedef struct kmp_local {
2384 volatile int this_construct;
2389#if !USE_CMP_XCHG_FOR_BGET
2390#ifdef USE_QUEUING_LOCK_FOR_BGET
2391 kmp_lock_t bget_lock;
2393 kmp_bootstrap_lock_t bget_lock;
2400 PACKED_REDUCTION_METHOD_T
2401 packed_reduction_method;
2406#define KMP_CHECK_UPDATE(a, b) \
2409#define KMP_CHECK_UPDATE_SYNC(a, b) \
2411 TCW_SYNC_PTR((a), (b))
2413#define get__blocktime(xteam, xtid) \
2414 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime)
2415#define get__bt_set(xteam, xtid) \
2416 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set)
2418#define get__bt_intervals(xteam, xtid) \
2419 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals)
2422#define get__dynamic_2(xteam, xtid) \
2423 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.dynamic)
2424#define get__nproc_2(xteam, xtid) \
2425 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.nproc)
2426#define get__sched_2(xteam, xtid) \
2427 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.sched)
2429#define set__blocktime_team(xteam, xtid, xval) \
2430 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime) = \
2434#define set__bt_intervals_team(xteam, xtid, xval) \
2435 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals) = \
2439#define set__bt_set_team(xteam, xtid, xval) \
2440 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set) = (xval))
2442#define set__dynamic(xthread, xval) \
2443 (((xthread)->th.th_current_task->td_icvs.dynamic) = (xval))
2444#define get__dynamic(xthread) \
2445 (((xthread)->th.th_current_task->td_icvs.dynamic) ? (FTN_TRUE) : (FTN_FALSE))
2447#define set__nproc(xthread, xval) \
2448 (((xthread)->th.th_current_task->td_icvs.nproc) = (xval))
2450#define set__thread_limit(xthread, xval) \
2451 (((xthread)->th.th_current_task->td_icvs.thread_limit) = (xval))
2453#define set__max_active_levels(xthread, xval) \
2454 (((xthread)->th.th_current_task->td_icvs.max_active_levels) = (xval))
2456#define get__max_active_levels(xthread) \
2457 ((xthread)->th.th_current_task->td_icvs.max_active_levels)
2459#define set__sched(xthread, xval) \
2460 (((xthread)->th.th_current_task->td_icvs.sched) = (xval))
2462#define set__proc_bind(xthread, xval) \
2463 (((xthread)->th.th_current_task->td_icvs.proc_bind) = (xval))
2464#define get__proc_bind(xthread) \
2465 ((xthread)->th.th_current_task->td_icvs.proc_bind)
2469typedef enum kmp_tasking_mode {
2470 tskm_immediate_exec = 0,
2471 tskm_extra_barrier = 1,
2472 tskm_task_teams = 2,
2474} kmp_tasking_mode_t;
2476extern kmp_tasking_mode_t
2478extern int __kmp_task_stealing_constraint;
2479extern int __kmp_enable_task_throttling;
2480extern kmp_int32 __kmp_default_device;
2483extern kmp_int32 __kmp_max_task_priority;
2485extern kmp_uint64 __kmp_taskloop_min_tasks;
2489#define KMP_TASK_TO_TASKDATA(task) (((kmp_taskdata_t *)task) - 1)
2490#define KMP_TASKDATA_TO_TASK(taskdata) (kmp_task_t *)(taskdata + 1)
2494#define KMP_TASKING_ENABLED(task_team) \
2495 (TRUE == TCR_SYNC_4((task_team)->tt.tt_found_tasks))
2503typedef kmp_int32 (*kmp_routine_entry_t)(kmp_int32,
void *);
2505typedef union kmp_cmplrdata {
2516typedef struct kmp_task {
2523 kmp_cmplrdata_t data2;
2532typedef struct kmp_taskgroup {
2533 std::atomic<kmp_int32> count;
2534 std::atomic<kmp_int32>
2536 struct kmp_taskgroup *parent;
2539 kmp_int32 reduce_num_data;
2540 uintptr_t *gomp_data;
2544typedef union kmp_depnode kmp_depnode_t;
2545typedef struct kmp_depnode_list kmp_depnode_list_t;
2546typedef struct kmp_dephash_entry kmp_dephash_entry_t;
2549#define KMP_DEP_IN 0x1
2550#define KMP_DEP_OUT 0x2
2551#define KMP_DEP_INOUT 0x3
2552#define KMP_DEP_MTX 0x4
2553#define KMP_DEP_SET 0x8
2554#define KMP_DEP_ALL 0x80
2557typedef struct kmp_depend_info {
2558 kmp_intptr_t base_addr;
2563#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
2566 unsigned unused : 3;
2576 unsigned unused : 3;
2584struct kmp_depnode_list {
2585 kmp_depnode_t *node;
2586 kmp_depnode_list_t *next;
2590#define MAX_MTX_DEPS 4
2592typedef struct kmp_base_depnode {
2593 kmp_depnode_list_t *successors;
2595 kmp_lock_t *mtx_locks[MAX_MTX_DEPS];
2596 kmp_int32 mtx_num_locks;
2598#if KMP_SUPPORT_GRAPH_OUTPUT
2601 std::atomic<kmp_int32> npredecessors;
2602 std::atomic<kmp_int32> nrefs;
2603} kmp_base_depnode_t;
2605union KMP_ALIGN_CACHE kmp_depnode {
2607 char dn_pad[KMP_PAD(kmp_base_depnode_t, CACHE_LINE)];
2608 kmp_base_depnode_t dn;
2611struct kmp_dephash_entry {
2613 kmp_depnode_t *last_out;
2614 kmp_depnode_list_t *last_set;
2615 kmp_depnode_list_t *prev_set;
2616 kmp_uint8 last_flag;
2617 kmp_lock_t *mtx_lock;
2618 kmp_dephash_entry_t *next_in_bucket;
2621typedef struct kmp_dephash {
2622 kmp_dephash_entry_t **buckets;
2624 kmp_depnode_t *last_all;
2626 kmp_uint32 nelements;
2627 kmp_uint32 nconflicts;
2630typedef struct kmp_task_affinity_info {
2631 kmp_intptr_t base_addr;
2636 kmp_int32 reserved : 30;
2638} kmp_task_affinity_info_t;
2640typedef enum kmp_event_type_t {
2641 KMP_EVENT_UNINITIALIZED = 0,
2642 KMP_EVENT_ALLOW_COMPLETION = 1
2646 kmp_event_type_t type;
2647 kmp_tas_lock_t lock;
2655#define INIT_MAPSIZE 50
2657typedef struct kmp_taskgraph_flags {
2658 unsigned nowait : 1;
2659 unsigned re_record : 1;
2660 unsigned reserved : 30;
2661} kmp_taskgraph_flags_t;
2664typedef struct kmp_node_info {
2666 kmp_int32 *successors;
2667 kmp_int32 nsuccessors;
2668 std::atomic<kmp_int32>
2669 npredecessors_counter;
2670 kmp_int32 npredecessors;
2671 kmp_int32 successors_size;
2672 kmp_taskdata_t *parent_task;
2676typedef enum kmp_tdg_status {
2678 KMP_TDG_RECORDING = 1,
2683typedef struct kmp_tdg_info {
2685 kmp_taskgraph_flags_t tdg_flags;
2687 kmp_int32 num_roots;
2688 kmp_int32 *root_tasks;
2689 kmp_node_info_t *record_map;
2690 kmp_tdg_status_t tdg_status =
2692 std::atomic<kmp_int32> num_tasks;
2693 kmp_bootstrap_lock_t
2696 void *rec_taskred_data;
2698 kmp_int32 rec_num_taskred;
2701extern int __kmp_tdg_dot;
2702extern kmp_int32 __kmp_max_tdgs;
2703extern kmp_tdg_info_t **__kmp_global_tdgs;
2704extern kmp_int32 __kmp_curr_tdg_idx;
2705extern kmp_int32 __kmp_successors_size;
2706extern std::atomic<kmp_int32> __kmp_tdg_task_id;
2707extern kmp_int32 __kmp_num_tdg;
2710#ifdef BUILD_TIED_TASK_STACK
2713typedef struct kmp_stack_block {
2714 kmp_taskdata_t *sb_block[TASK_STACK_BLOCK_SIZE];
2715 struct kmp_stack_block *sb_next;
2716 struct kmp_stack_block *sb_prev;
2719typedef struct kmp_task_stack {
2720 kmp_stack_block_t ts_first_block;
2721 kmp_taskdata_t **ts_top;
2722 kmp_int32 ts_entries;
2727typedef struct kmp_tasking_flags {
2728#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
2731 unsigned reserved31 : 4;
2734 unsigned reserved31 : 5;
2736 unsigned hidden_helper : 1;
2737 unsigned target : 1;
2738 unsigned native : 1;
2740 unsigned complete : 1;
2741 unsigned executing : 1;
2742 unsigned started : 1;
2743 unsigned team_serial : 1;
2744 unsigned tasking_ser : 1;
2745 unsigned task_serial : 1;
2746 unsigned tasktype : 1;
2747 unsigned reserved : 8;
2748 unsigned free_agent_eligible : 1;
2749 unsigned detachable : 1;
2750 unsigned priority_specified : 1;
2752 unsigned destructors_thunk : 1;
2753 unsigned merged_if0 : 1;
2755 unsigned tiedness : 1;
2758 unsigned tiedness : 1;
2760 unsigned merged_if0 : 1;
2762 unsigned destructors_thunk : 1;
2766 unsigned priority_specified : 1;
2768 unsigned detachable : 1;
2769 unsigned free_agent_eligible : 1;
2771 unsigned reserved : 8;
2774 unsigned tasktype : 1;
2775 unsigned task_serial : 1;
2776 unsigned tasking_ser : 1;
2778 unsigned team_serial : 1;
2782 unsigned started : 1;
2783 unsigned executing : 1;
2784 unsigned complete : 1;
2786 unsigned native : 1;
2787 unsigned target : 1;
2788 unsigned hidden_helper : 1;
2791 unsigned reserved31 : 4;
2793 unsigned reserved31 : 5;
2796} kmp_tasking_flags_t;
2798typedef struct kmp_target_data {
2802struct kmp_taskdata {
2803 kmp_int32 td_task_id;
2804 kmp_tasking_flags_t td_flags;
2805 kmp_team_t *td_team;
2806 kmp_info_p *td_alloc_thread;
2808 kmp_taskdata_t *td_parent;
2810 std::atomic<kmp_int32> td_untied_count;
2814 kmp_uint32 td_taskwait_counter;
2815 kmp_int32 td_taskwait_thread;
2816 KMP_ALIGN_CACHE kmp_internal_control_t
2818 KMP_ALIGN_CACHE std::atomic<kmp_int32>
2819 td_allocated_child_tasks;
2821 std::atomic<kmp_int32>
2822 td_incomplete_child_tasks;
2829 kmp_task_team_t *td_task_team;
2830 size_t td_size_alloc;
2831#if defined(KMP_GOMP_COMPAT)
2833 kmp_int32 td_size_loop_bounds;
2835 kmp_taskdata_t *td_last_tied;
2836#if defined(KMP_GOMP_COMPAT)
2838 void (*td_copy_func)(
void *,
void *);
2840 kmp_event_t td_allow_completion_event;
2842 ompt_task_info_t ompt_task_info;
2845 bool is_taskgraph = 0;
2846 kmp_tdg_info_t *tdg;
2847 kmp_int32 td_tdg_task_id;
2849 kmp_target_data_t td_target_data;
2853KMP_BUILD_ASSERT(
sizeof(kmp_taskdata_t) %
sizeof(
void *) == 0);
2856typedef struct kmp_base_thread_data {
2860 kmp_bootstrap_lock_t td_deque_lock;
2863 kmp_int32 td_deque_size;
2864 kmp_uint32 td_deque_head;
2865 kmp_uint32 td_deque_tail;
2866 kmp_int32 td_deque_ntasks;
2868 kmp_int32 td_deque_last_stolen;
2869#ifdef BUILD_TIED_TASK_STACK
2870 kmp_task_stack_t td_susp_tied_tasks;
2873} kmp_base_thread_data_t;
2875#define TASK_DEQUE_BITS 8
2876#define INITIAL_TASK_DEQUE_SIZE (1 << TASK_DEQUE_BITS)
2878#define TASK_DEQUE_SIZE(td) ((td).td_deque_size)
2879#define TASK_DEQUE_MASK(td) ((td).td_deque_size - 1)
2881typedef union KMP_ALIGN_CACHE kmp_thread_data {
2882 kmp_base_thread_data_t td;
2884 char td_pad[KMP_PAD(kmp_base_thread_data_t, CACHE_LINE)];
2887typedef struct kmp_task_pri {
2888 kmp_thread_data_t td;
2894typedef struct kmp_base_task_team {
2895 kmp_bootstrap_lock_t
2900 kmp_bootstrap_lock_t tt_task_pri_lock;
2901 kmp_task_pri_t *tt_task_pri_list;
2903 kmp_task_team_t *tt_next;
2907 kmp_int32 tt_found_tasks;
2911 kmp_int32 tt_max_threads;
2912 kmp_int32 tt_found_proxy_tasks;
2913 kmp_int32 tt_untied_task_encountered;
2914 std::atomic<kmp_int32> tt_num_task_pri;
2917 kmp_int32 tt_hidden_helper_task_encountered;
2920 std::atomic<kmp_int32> tt_unfinished_threads;
2925} kmp_base_task_team_t;
2927union KMP_ALIGN_CACHE kmp_task_team {
2928 kmp_base_task_team_t tt;
2930 char tt_pad[KMP_PAD(kmp_base_task_team_t, CACHE_LINE)];
2933typedef struct kmp_task_team_list_t {
2934 kmp_task_team_t *task_team;
2935 kmp_task_team_list_t *next;
2936} kmp_task_team_list_t;
2938#if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
2941typedef struct kmp_free_list {
2942 void *th_free_list_self;
2943 void *th_free_list_sync;
2945 void *th_free_list_other;
2949#if KMP_NESTED_HOT_TEAMS
2952typedef struct kmp_hot_team_ptr {
2953 kmp_team_p *hot_team;
2954 kmp_int32 hot_team_nth;
2955} kmp_hot_team_ptr_t;
2957typedef struct kmp_teams_size {
2973typedef struct kmp_cg_root {
2974 kmp_info_p *cg_root;
2977 kmp_int32 cg_thread_limit;
2978 kmp_int32 cg_nthreads;
2979 struct kmp_cg_root *up;
2984typedef struct KMP_ALIGN_CACHE kmp_base_info {
2990 kmp_team_p *th_team;
2991 kmp_root_p *th_root;
2992 kmp_info_p *th_next_pool;
2993 kmp_disp_t *th_dispatch;
2999 kmp_info_p *th_team_master;
3000 int th_team_serialized;
3001 microtask_t th_teams_microtask;
3010 int th_team_bt_intervals;
3013 kmp_uint64 th_team_bt_intervals;
3016#if KMP_AFFINITY_SUPPORTED
3017 kmp_affin_mask_t *th_affin_mask;
3018 kmp_affinity_ids_t th_topology_ids;
3019 kmp_affinity_attrs_t th_topology_attrs;
3021 omp_allocator_handle_t th_def_allocator;
3025 int *th_set_nested_nth;
3029 const char *th_nt_msg;
3030 int th_set_nested_nth_sz;
3031#if KMP_NESTED_HOT_TEAMS
3032 kmp_hot_team_ptr_t *th_hot_teams;
3038#if KMP_AFFINITY_SUPPORTED
3039 int th_current_place;
3045 int th_prev_num_threads;
3047 kmp_uint64 th_bar_arrive_time;
3048 kmp_uint64 th_bar_min_time;
3049 kmp_uint64 th_frame_time;
3051 kmp_local_t th_local;
3052 struct private_common *th_pri_head;
3057 KMP_ALIGN_CACHE kmp_team_p
3061 ompt_thread_info_t ompt_thread_info;
3065 struct common_table *th_pri_common;
3067 volatile kmp_uint32 th_spin_here;
3070 volatile void *th_sleep_loc;
3071 flag_type th_sleep_loc_type;
3078 kmp_task_team_t *th_task_team;
3079 kmp_taskdata_t *th_current_task;
3080 kmp_uint8 th_task_state;
3081 kmp_uint32 th_reap_state;
3086 kmp_uint8 th_active_in_pool;
3088 std::atomic<kmp_uint32> th_used_in_team;
3091 struct cons_header *th_cons;
3092#if KMP_USE_HIER_SCHED
3094 kmp_hier_private_bdata_t *th_hier_bar_data;
3098 KMP_ALIGN_CACHE kmp_balign_t th_bar[bs_last_barrier];
3100 KMP_ALIGN_CACHE
volatile kmp_int32
3103#if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
3105 kmp_free_list_t th_free_lists[NUM_LISTS];
3110 kmp_win32_cond_t th_suspend_cv;
3111 kmp_win32_mutex_t th_suspend_mx;
3112 std::atomic<int> th_suspend_init;
3115 kmp_cond_align_t th_suspend_cv;
3116 kmp_mutex_align_t th_suspend_mx;
3117 std::atomic<int> th_suspend_init_count;
3121 kmp_itt_mark_t th_itt_mark_single;
3124#if KMP_STATS_ENABLED
3125 kmp_stats_list *th_stats;
3128 std::atomic<bool> th_blocking;
3130 kmp_cg_root_t *th_cg_roots;
3133typedef union KMP_ALIGN_CACHE kmp_info {
3135 char th_pad[KMP_PAD(kmp_base_info_t, CACHE_LINE)];
3141typedef struct kmp_base_data {
3142 volatile kmp_uint32 t_value;
3145typedef union KMP_ALIGN_CACHE kmp_sleep_team {
3147 char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
3151typedef union KMP_ALIGN_CACHE kmp_ordered_team {
3153 char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
3155} kmp_ordered_team_t;
3157typedef int (*launch_t)(
int gtid);
3160#define KMP_MIN_MALLOC_ARGV_ENTRIES 100
3166#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3167#define KMP_INLINE_ARGV_BYTES \
3169 ((3 * KMP_PTR_SKIP + 2 * sizeof(int) + 2 * sizeof(kmp_int8) + \
3170 sizeof(kmp_int16) + sizeof(kmp_uint32)) % \
3173#define KMP_INLINE_ARGV_BYTES \
3174 (2 * CACHE_LINE - ((3 * KMP_PTR_SKIP + 2 * sizeof(int)) % CACHE_LINE))
3176#define KMP_INLINE_ARGV_ENTRIES (int)(KMP_INLINE_ARGV_BYTES / KMP_PTR_SKIP)
3178typedef struct KMP_ALIGN_CACHE kmp_base_team {
3181 KMP_ALIGN_CACHE kmp_ordered_team_t t_ordered;
3182 kmp_balign_team_t t_bar[bs_last_barrier];
3183 std::atomic<int> t_construct;
3184 char pad[
sizeof(kmp_lock_t)];
3187 std::atomic<void *> t_tg_reduce_data[2];
3188 std::atomic<int> t_tg_fini_counter[2];
3192 KMP_ALIGN_CACHE
int t_master_tid;
3193 int t_master_this_cons;
3197 kmp_team_p *t_parent;
3198 kmp_team_p *t_next_pool;
3199 kmp_disp_t *t_dispatch;
3200 kmp_task_team_t *t_task_team[2];
3201 kmp_proc_bind_t t_proc_bind;
3202 int t_primary_task_state;
3204 kmp_uint64 t_region_time;
3209 KMP_ALIGN_CACHE
void **t_argv;
3216 ompt_team_info_t ompt_team_info;
3217 ompt_lw_taskteam_t *ompt_serialized_team_info;
3220#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3221 kmp_int8 t_fp_control_saved;
3223 kmp_int16 t_x87_fpu_control_word;
3227 void *t_inline_argv[KMP_INLINE_ARGV_ENTRIES];
3229 KMP_ALIGN_CACHE kmp_info_t **t_threads;
3231 *t_implicit_task_taskdata;
3234 KMP_ALIGN_CACHE
int t_max_argc;
3237 dispatch_shared_info_t *t_disp_buffer;
3240 kmp_r_sched_t t_sched;
3241#if KMP_AFFINITY_SUPPORTED
3245 int t_display_affinity;
3248 omp_allocator_handle_t t_def_allocator;
3251#if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
3256 char dummy_padding[1024];
3259 KMP_ALIGN_CACHE kmp_internal_control_t *t_control_stack_top;
3262 std::atomic<kmp_int32> t_cancel_request;
3263 int t_master_active;
3264 void *t_copypriv_data;
3266 std::atomic<kmp_uint32> t_copyin_counter;
3271 distributedBarrier *b;
3272 kmp_nested_nthreads_t *t_nested_nth;
3277KMP_BUILD_ASSERT(
sizeof(kmp_task_team_t *[2]) ==
sizeof(kmp_task_team_list_t));
3278KMP_BUILD_ASSERT(
alignof(kmp_task_team_t *[2]) ==
3279 alignof(kmp_task_team_list_t));
3281union KMP_ALIGN_CACHE kmp_team {
3284 char t_pad[KMP_PAD(kmp_base_team_t, CACHE_LINE)];
3287typedef union KMP_ALIGN_CACHE kmp_time_global {
3289 char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
3293typedef struct kmp_base_global {
3295 kmp_time_global_t g_time;
3298 volatile int g_abort;
3299 volatile int g_done;
3302 enum dynamic_mode g_dynamic_mode;
3305typedef union KMP_ALIGN_CACHE kmp_global {
3306 kmp_base_global_t g;
3308 char g_pad[KMP_PAD(kmp_base_global_t, CACHE_LINE)];
3311typedef struct kmp_base_root {
3316 volatile int r_active;
3318 std::atomic<int> r_in_parallel;
3320 kmp_team_t *r_root_team;
3321 kmp_team_t *r_hot_team;
3322 kmp_info_t *r_uber_thread;
3323 kmp_lock_t r_begin_lock;
3324 volatile int r_begin;
3326#if KMP_AFFINITY_SUPPORTED
3327 int r_affinity_assigned;
3331typedef union KMP_ALIGN_CACHE kmp_root {
3334 char r_pad[KMP_PAD(kmp_base_root_t, CACHE_LINE)];
3337struct fortran_inx_info {
3345typedef struct kmp_old_threads_list_t {
3346 kmp_info_t **threads;
3347 struct kmp_old_threads_list_t *next;
3348} kmp_old_threads_list_t;
3352extern int __kmp_settings;
3353extern int __kmp_duplicate_library_ok;
3355extern int __kmp_forkjoin_frames;
3356extern int __kmp_forkjoin_frames_mode;
3358extern PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method;
3359extern int __kmp_determ_red;
3362extern int kmp_a_debug;
3363extern int kmp_b_debug;
3364extern int kmp_c_debug;
3365extern int kmp_d_debug;
3366extern int kmp_e_debug;
3367extern int kmp_f_debug;
3371#define KMP_DEBUG_BUF_LINES_INIT 512
3372#define KMP_DEBUG_BUF_LINES_MIN 1
3374#define KMP_DEBUG_BUF_CHARS_INIT 128
3375#define KMP_DEBUG_BUF_CHARS_MIN 2
3379extern int __kmp_debug_buf_lines;
3381 __kmp_debug_buf_chars;
3382extern int __kmp_debug_buf_atomic;
3385extern char *__kmp_debug_buffer;
3386extern std::atomic<int> __kmp_debug_count;
3388extern int __kmp_debug_buf_warn_chars;
3393extern int __kmp_par_range;
3395#define KMP_PAR_RANGE_ROUTINE_LEN 1024
3396extern char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN];
3397#define KMP_PAR_RANGE_FILENAME_LEN 1024
3398extern char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN];
3399extern int __kmp_par_range_lb;
3400extern int __kmp_par_range_ub;
3406extern int __kmp_storage_map_verbose;
3408extern int __kmp_storage_map_verbose_specified;
3410#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3411extern kmp_cpuinfo_t __kmp_cpuinfo;
3412static inline bool __kmp_is_hybrid_cpu() {
return __kmp_cpuinfo.flags.hybrid; }
3413#elif KMP_OS_DARWIN && KMP_ARCH_AARCH64
3414static inline bool __kmp_is_hybrid_cpu() {
return true; }
3416static inline bool __kmp_is_hybrid_cpu() {
return false; }
3419extern volatile int __kmp_init_serial;
3420extern volatile int __kmp_init_gtid;
3421extern volatile int __kmp_init_common;
3422extern volatile int __kmp_need_register_serial;
3423extern volatile int __kmp_init_middle;
3424extern volatile int __kmp_init_parallel;
3426extern volatile int __kmp_init_monitor;
3428extern volatile int __kmp_init_user_locks;
3429extern volatile int __kmp_init_hidden_helper_threads;
3430extern int __kmp_init_counter;
3431extern int __kmp_root_counter;
3432extern int __kmp_version;
3435extern kmp_cached_addr_t *__kmp_threadpriv_cache_list;
3438extern kmp_uint32 __kmp_barrier_gather_bb_dflt;
3439extern kmp_uint32 __kmp_barrier_release_bb_dflt;
3440extern kmp_bar_pat_e __kmp_barrier_gather_pat_dflt;
3441extern kmp_bar_pat_e __kmp_barrier_release_pat_dflt;
3442extern kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier];
3443extern kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier];
3444extern kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier];
3445extern kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier];
3446extern char const *__kmp_barrier_branch_bit_env_name[bs_last_barrier];
3447extern char const *__kmp_barrier_pattern_env_name[bs_last_barrier];
3448extern char const *__kmp_barrier_type_name[bs_last_barrier];
3449extern char const *__kmp_barrier_pattern_name[bp_last_bar];
3452extern kmp_bootstrap_lock_t __kmp_initz_lock;
3453extern kmp_bootstrap_lock_t __kmp_forkjoin_lock;
3454extern kmp_bootstrap_lock_t __kmp_task_team_lock;
3455extern kmp_bootstrap_lock_t
3458extern kmp_bootstrap_lock_t
3461extern kmp_bootstrap_lock_t
3462 __kmp_tp_cached_lock;
3465extern kmp_lock_t __kmp_global_lock;
3467extern enum library_type __kmp_library;
3473extern int __kmp_chunk;
3474extern int __kmp_force_monotonic;
3476extern size_t __kmp_stksize;
3478extern size_t __kmp_monitor_stksize;
3480extern size_t __kmp_stkoffset;
3481extern int __kmp_stkpadding;
3484 __kmp_malloc_pool_incr;
3485extern int __kmp_env_stksize;
3486extern int __kmp_env_blocktime;
3487extern int __kmp_env_checks;
3488extern int __kmp_env_consistency_check;
3489extern int __kmp_generate_warnings;
3490extern int __kmp_reserve_warn;
3493extern int __kmp_suspend_count;
3496extern kmp_int32 __kmp_use_yield;
3497extern kmp_int32 __kmp_use_yield_exp_set;
3498extern kmp_uint32 __kmp_yield_init;
3499extern kmp_uint32 __kmp_yield_next;
3500extern kmp_uint64 __kmp_pause_init;
3503extern int __kmp_allThreadsSpecified;
3505extern size_t __kmp_align_alloc;
3507extern int __kmp_xproc;
3508extern int __kmp_avail_proc;
3509extern size_t __kmp_sys_min_stksize;
3510extern int __kmp_sys_max_nth;
3512extern int __kmp_max_nth;
3514extern int __kmp_cg_max_nth;
3515extern int __kmp_task_max_nth;
3516extern int __kmp_teams_max_nth;
3517extern int __kmp_threads_capacity;
3519extern int __kmp_dflt_team_nth;
3521extern int __kmp_dflt_team_nth_ub;
3523extern int __kmp_tp_capacity;
3525extern int __kmp_tp_cached;
3527extern int __kmp_dflt_blocktime;
3529extern char __kmp_blocktime_units;
3530extern bool __kmp_wpolicy_passive;
3533static inline void __kmp_aux_convert_blocktime(
int *bt) {
3534 if (__kmp_blocktime_units ==
'm') {
3535 if (*bt > INT_MAX / 1000) {
3536 *bt = INT_MAX / 1000;
3537 KMP_INFORM(MaxValueUsing,
"kmp_set_blocktime(ms)", bt);
3545 __kmp_monitor_wakeups;
3546extern int __kmp_bt_intervals;
3549#ifdef KMP_ADJUST_BLOCKTIME
3550extern int __kmp_zero_bt;
3552#ifdef KMP_DFLT_NTH_CORES
3553extern int __kmp_ncores;
3556extern int __kmp_abort_delay;
3558extern int __kmp_need_register_atfork_specified;
3559extern int __kmp_need_register_atfork;
3561extern int __kmp_gtid_mode;
3569 __kmp_adjust_gtid_mode;
3570#ifdef KMP_TDATA_GTID
3571extern KMP_THREAD_LOCAL
int __kmp_gtid;
3573extern int __kmp_tls_gtid_min;
3574extern int __kmp_foreign_tp;
3575#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3576extern int __kmp_inherit_fp_control;
3577extern kmp_int16 __kmp_init_x87_fpu_control_word;
3578extern kmp_uint32 __kmp_init_mxcsr;
3583extern int __kmp_dflt_max_active_levels;
3586extern bool __kmp_dflt_max_active_levels_set;
3587extern int __kmp_dispatch_num_buffers;
3589#if KMP_NESTED_HOT_TEAMS
3590extern int __kmp_hot_teams_mode;
3591extern int __kmp_hot_teams_max_level;
3594#if KMP_MIC_SUPPORTED
3595extern enum mic_type __kmp_mic_type;
3598#ifdef USE_LOAD_BALANCE
3599extern double __kmp_load_balance_interval;
3602#if KMP_USE_ADAPTIVE_LOCKS
3605struct kmp_adaptive_backoff_params_t {
3607 kmp_uint32 max_soft_retries;
3610 kmp_uint32 max_badness;
3613extern kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params;
3615#if KMP_DEBUG_ADAPTIVE_LOCKS
3616extern const char *__kmp_speculative_statsfile;
3621extern int __kmp_display_env;
3622extern int __kmp_display_env_verbose;
3623extern int __kmp_omp_cancellation;
3624extern int __kmp_nteams;
3625extern int __kmp_teams_thread_limit;
3631extern kmp_info_t **__kmp_threads;
3633extern kmp_old_threads_list_t *__kmp_old_threads_list;
3635extern volatile kmp_team_t *__kmp_team_pool;
3636extern volatile kmp_info_t *__kmp_thread_pool;
3637extern kmp_info_t *__kmp_thread_pool_insert_pt;
3640extern volatile int __kmp_nth;
3643extern volatile int __kmp_all_nth;
3644extern std::atomic<int> __kmp_thread_pool_active_nth;
3646extern kmp_root_t **__kmp_root;
3650#define __kmp_get_gtid() __kmp_get_global_thread_id()
3651#define __kmp_entry_gtid() __kmp_get_global_thread_id_reg()
3652#define __kmp_get_tid() (__kmp_tid_from_gtid(__kmp_get_gtid()))
3653#define __kmp_get_team() (__kmp_threads[(__kmp_get_gtid())]->th.th_team)
3654#define __kmp_get_thread() (__kmp_thread_from_gtid(__kmp_get_gtid()))
3659#define __kmp_get_team_num_threads(gtid) \
3660 (__kmp_threads[(gtid)]->th.th_team->t.t_nproc)
3662static inline bool KMP_UBER_GTID(
int gtid) {
3663 KMP_DEBUG_ASSERT(gtid >= KMP_GTID_MIN);
3664 KMP_DEBUG_ASSERT(gtid < __kmp_threads_capacity);
3665 return (gtid >= 0 && __kmp_root[gtid] && __kmp_threads[gtid] &&
3666 __kmp_threads[gtid] == __kmp_root[gtid]->r.r_uber_thread);
3669static inline int __kmp_tid_from_gtid(
int gtid) {
3670 KMP_DEBUG_ASSERT(gtid >= 0);
3671 return __kmp_threads[gtid]->th.th_info.ds.ds_tid;
3674static inline int __kmp_gtid_from_tid(
int tid,
const kmp_team_t *team) {
3675 KMP_DEBUG_ASSERT(tid >= 0 && team);
3676 return team->t.t_threads[tid]->th.th_info.ds.ds_gtid;
3679static inline int __kmp_gtid_from_thread(
const kmp_info_t *thr) {
3680 KMP_DEBUG_ASSERT(thr);
3681 return thr->th.th_info.ds.ds_gtid;
3684static inline kmp_info_t *__kmp_thread_from_gtid(
int gtid) {
3685 KMP_DEBUG_ASSERT(gtid >= 0);
3686 return __kmp_threads[gtid];
3689static inline kmp_team_t *__kmp_team_from_gtid(
int gtid) {
3690 KMP_DEBUG_ASSERT(gtid >= 0);
3691 return __kmp_threads[gtid]->th.th_team;
3694static inline void __kmp_assert_valid_gtid(kmp_int32 gtid) {
3695 if (UNLIKELY(gtid < 0 || gtid >= __kmp_threads_capacity))
3696 KMP_FATAL(ThreadIdentInvalid);
3699#if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
3700extern int __kmp_user_level_mwait;
3701extern int __kmp_umwait_enabled;
3702extern int __kmp_mwait_enabled;
3703extern int __kmp_mwait_hints;
3707extern int __kmp_waitpkg_enabled;
3708extern int __kmp_tpause_state;
3709extern int __kmp_tpause_hint;
3710extern int __kmp_tpause_enabled;
3715extern kmp_global_t __kmp_global;
3717extern kmp_info_t __kmp_monitor;
3719extern std::atomic<kmp_int32> __kmp_team_counter;
3721extern std::atomic<kmp_int32> __kmp_task_counter;
3724#define _KMP_GEN_ID(counter) \
3725 (__kmp_debugging ? KMP_ATOMIC_INC(&counter) + 1 : ~0)
3727#define _KMP_GEN_ID(counter) (~0)
3730#define KMP_GEN_TASK_ID() _KMP_GEN_ID(__kmp_task_counter)
3731#define KMP_GEN_TEAM_ID() _KMP_GEN_ID(__kmp_team_counter)
3735extern void __kmp_print_storage_map_gtid(
int gtid,
void *p1,
void *p2,
3736 size_t size,
char const *format, ...);
3738extern void __kmp_serial_initialize(
void);
3739extern void __kmp_middle_initialize(
void);
3740extern void __kmp_parallel_initialize(
void);
3742extern void __kmp_internal_begin(
void);
3743extern void __kmp_internal_end_library(
int gtid);
3744extern void __kmp_internal_end_thread(
int gtid);
3745extern void __kmp_internal_end_atexit(
void);
3746extern void __kmp_internal_end_dtor(
void);
3747extern void __kmp_internal_end_dest(
void *);
3749extern int __kmp_register_root(
int initial_thread);
3750extern void __kmp_unregister_root(
int gtid);
3751extern void __kmp_unregister_library(
void);
3753extern int __kmp_ignore_mppbeg(
void);
3754extern int __kmp_ignore_mppend(
void);
3756extern int __kmp_enter_single(
int gtid,
ident_t *id_ref,
int push_ws);
3757extern void __kmp_exit_single(
int gtid);
3759extern void __kmp_parallel_deo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref);
3760extern void __kmp_parallel_dxo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref);
3762#ifdef USE_LOAD_BALANCE
3763extern int __kmp_get_load_balance(
int);
3766extern int __kmp_get_global_thread_id(
void);
3767extern int __kmp_get_global_thread_id_reg(
void);
3768extern void __kmp_exit_thread(
int exit_status);
3769extern void __kmp_abort(
char const *format, ...);
3770extern void __kmp_abort_thread(
void);
3771KMP_NORETURN
extern void __kmp_abort_process(
void);
3772extern void __kmp_warn(
char const *format, ...);
3774extern void __kmp_set_num_threads(
int new_nth,
int gtid);
3776extern bool __kmp_detect_shm();
3777extern bool __kmp_detect_tmp();
3781static inline kmp_info_t *__kmp_entry_thread() {
3782 int gtid = __kmp_entry_gtid();
3784 return __kmp_threads[gtid];
3787extern void __kmp_set_max_active_levels(
int gtid,
int new_max_active_levels);
3788extern int __kmp_get_max_active_levels(
int gtid);
3789extern int __kmp_get_ancestor_thread_num(
int gtid,
int level);
3790extern int __kmp_get_team_size(
int gtid,
int level);
3791extern void __kmp_set_schedule(
int gtid, kmp_sched_t new_sched,
int chunk);
3792extern void __kmp_get_schedule(
int gtid, kmp_sched_t *sched,
int *chunk);
3794extern unsigned short __kmp_get_random(kmp_info_t *thread);
3795extern void __kmp_init_random(kmp_info_t *thread);
3797extern kmp_r_sched_t __kmp_get_schedule_global(
void);
3798extern void __kmp_adjust_num_threads(
int new_nproc);
3799extern void __kmp_check_stksize(
size_t *val);
3801extern void *___kmp_allocate(
size_t size KMP_SRC_LOC_DECL);
3802extern void *___kmp_page_allocate(
size_t size KMP_SRC_LOC_DECL);
3803extern void ___kmp_free(
void *ptr KMP_SRC_LOC_DECL);
3804#define __kmp_allocate(size) ___kmp_allocate((size)KMP_SRC_LOC_CURR)
3805#define __kmp_page_allocate(size) ___kmp_page_allocate((size)KMP_SRC_LOC_CURR)
3806#define __kmp_free(ptr) ___kmp_free((ptr)KMP_SRC_LOC_CURR)
3809extern void *___kmp_fast_allocate(kmp_info_t *this_thr,
3810 size_t size KMP_SRC_LOC_DECL);
3811extern void ___kmp_fast_free(kmp_info_t *this_thr,
void *ptr KMP_SRC_LOC_DECL);
3812extern void __kmp_free_fast_memory(kmp_info_t *this_thr);
3813extern void __kmp_initialize_fast_memory(kmp_info_t *this_thr);
3814#define __kmp_fast_allocate(this_thr, size) \
3815 ___kmp_fast_allocate((this_thr), (size)KMP_SRC_LOC_CURR)
3816#define __kmp_fast_free(this_thr, ptr) \
3817 ___kmp_fast_free((this_thr), (ptr)KMP_SRC_LOC_CURR)
3820extern void *___kmp_thread_malloc(kmp_info_t *th,
size_t size KMP_SRC_LOC_DECL);
3821extern void *___kmp_thread_calloc(kmp_info_t *th,
size_t nelem,
3822 size_t elsize KMP_SRC_LOC_DECL);
3823extern void *___kmp_thread_realloc(kmp_info_t *th,
void *ptr,
3824 size_t size KMP_SRC_LOC_DECL);
3825extern void ___kmp_thread_free(kmp_info_t *th,
void *ptr KMP_SRC_LOC_DECL);
3826#define __kmp_thread_malloc(th, size) \
3827 ___kmp_thread_malloc((th), (size)KMP_SRC_LOC_CURR)
3828#define __kmp_thread_calloc(th, nelem, elsize) \
3829 ___kmp_thread_calloc((th), (nelem), (elsize)KMP_SRC_LOC_CURR)
3830#define __kmp_thread_realloc(th, ptr, size) \
3831 ___kmp_thread_realloc((th), (ptr), (size)KMP_SRC_LOC_CURR)
3832#define __kmp_thread_free(th, ptr) \
3833 ___kmp_thread_free((th), (ptr)KMP_SRC_LOC_CURR)
3835extern void __kmp_push_num_threads(
ident_t *loc,
int gtid,
int num_threads);
3836extern void __kmp_push_num_threads_list(
ident_t *loc,
int gtid,
3837 kmp_uint32 list_length,
3838 int *num_threads_list);
3839extern void __kmp_set_strict_num_threads(
ident_t *loc,
int gtid,
int sev,
3842extern void __kmp_push_proc_bind(
ident_t *loc,
int gtid,
3843 kmp_proc_bind_t proc_bind);
3844extern void __kmp_push_num_teams(
ident_t *loc,
int gtid,
int num_teams,
3846extern void __kmp_push_num_teams_51(
ident_t *loc,
int gtid,
int num_teams_lb,
3847 int num_teams_ub,
int num_threads);
3849extern void __kmp_yield();
3853 kmp_int32 ub, kmp_int32 st, kmp_int32 chunk);
3854extern void __kmpc_dispatch_init_4u(
ident_t *loc, kmp_int32 gtid,
3856 kmp_uint32 ub, kmp_int32 st,
3858extern void __kmpc_dispatch_init_8(
ident_t *loc, kmp_int32 gtid,
3860 kmp_int64 ub, kmp_int64 st, kmp_int64 chunk);
3861extern void __kmpc_dispatch_init_8u(
ident_t *loc, kmp_int32 gtid,
3863 kmp_uint64 ub, kmp_int64 st,
3866extern int __kmpc_dispatch_next_4(
ident_t *loc, kmp_int32 gtid,
3867 kmp_int32 *p_last, kmp_int32 *p_lb,
3868 kmp_int32 *p_ub, kmp_int32 *p_st);
3869extern int __kmpc_dispatch_next_4u(
ident_t *loc, kmp_int32 gtid,
3870 kmp_int32 *p_last, kmp_uint32 *p_lb,
3871 kmp_uint32 *p_ub, kmp_int32 *p_st);
3872extern int __kmpc_dispatch_next_8(
ident_t *loc, kmp_int32 gtid,
3873 kmp_int32 *p_last, kmp_int64 *p_lb,
3874 kmp_int64 *p_ub, kmp_int64 *p_st);
3875extern int __kmpc_dispatch_next_8u(
ident_t *loc, kmp_int32 gtid,
3876 kmp_int32 *p_last, kmp_uint64 *p_lb,
3877 kmp_uint64 *p_ub, kmp_int64 *p_st);
3879extern void __kmpc_dispatch_fini_4(
ident_t *loc, kmp_int32 gtid);
3880extern void __kmpc_dispatch_fini_8(
ident_t *loc, kmp_int32 gtid);
3881extern void __kmpc_dispatch_fini_4u(
ident_t *loc, kmp_int32 gtid);
3882extern void __kmpc_dispatch_fini_8u(
ident_t *loc, kmp_int32 gtid);
3884extern void __kmpc_dispatch_deinit(
ident_t *loc, kmp_int32 gtid);
3886#ifdef KMP_GOMP_COMPAT
3888extern void __kmp_aux_dispatch_init_4(
ident_t *loc, kmp_int32 gtid,
3890 kmp_int32 ub, kmp_int32 st,
3891 kmp_int32 chunk,
int push_ws);
3892extern void __kmp_aux_dispatch_init_4u(
ident_t *loc, kmp_int32 gtid,
3894 kmp_uint32 ub, kmp_int32 st,
3895 kmp_int32 chunk,
int push_ws);
3896extern void __kmp_aux_dispatch_init_8(
ident_t *loc, kmp_int32 gtid,
3898 kmp_int64 ub, kmp_int64 st,
3899 kmp_int64 chunk,
int push_ws);
3900extern void __kmp_aux_dispatch_init_8u(
ident_t *loc, kmp_int32 gtid,
3902 kmp_uint64 ub, kmp_int64 st,
3903 kmp_int64 chunk,
int push_ws);
3904extern void __kmp_aux_dispatch_fini_chunk_4(
ident_t *loc, kmp_int32 gtid);
3905extern void __kmp_aux_dispatch_fini_chunk_8(
ident_t *loc, kmp_int32 gtid);
3906extern void __kmp_aux_dispatch_fini_chunk_4u(
ident_t *loc, kmp_int32 gtid);
3907extern void __kmp_aux_dispatch_fini_chunk_8u(
ident_t *loc, kmp_int32 gtid);
3911extern kmp_uint32 __kmp_eq_4(kmp_uint32 value, kmp_uint32 checker);
3912extern kmp_uint32 __kmp_neq_4(kmp_uint32 value, kmp_uint32 checker);
3913extern kmp_uint32 __kmp_lt_4(kmp_uint32 value, kmp_uint32 checker);
3914extern kmp_uint32 __kmp_ge_4(kmp_uint32 value, kmp_uint32 checker);
3915extern kmp_uint32 __kmp_le_4(kmp_uint32 value, kmp_uint32 checker);
3916extern kmp_uint32 __kmp_wait_4(kmp_uint32
volatile *spinner, kmp_uint32 checker,
3917 kmp_uint32 (*pred)(kmp_uint32, kmp_uint32),
3919extern void __kmp_wait_4_ptr(
void *spinner, kmp_uint32 checker,
3920 kmp_uint32 (*pred)(
void *, kmp_uint32),
void *obj);
3922extern void __kmp_wait_64(kmp_info_t *this_thr, kmp_flag_64<> *flag,
3929extern void __kmp_release_64(kmp_flag_64<> *flag);
3931extern void __kmp_infinite_loop(
void);
3933extern void __kmp_cleanup(
void);
3935#if KMP_HANDLE_SIGNALS
3936extern int __kmp_handle_signals;
3937extern void __kmp_install_signals(
int parallel_init);
3938extern void __kmp_remove_signals(
void);
3941extern void __kmp_clear_system_time(
void);
3942extern void __kmp_read_system_time(
double *delta);
3944extern void __kmp_check_stack_overlap(kmp_info_t *thr);
3946extern void __kmp_expand_host_name(
char *buffer,
size_t size);
3947extern void __kmp_expand_file_name(
char *result,
size_t rlen,
char *pattern);
3949#if KMP_ARCH_X86 || KMP_ARCH_X86_64 || (KMP_OS_WINDOWS && (KMP_ARCH_AARCH64 || KMP_ARCH_ARM))
3951__kmp_initialize_system_tick(
void);
3955__kmp_runtime_initialize(
void);
3956extern void __kmp_runtime_destroy(
void);
3958#if KMP_AFFINITY_SUPPORTED
3959extern char *__kmp_affinity_print_mask(
char *buf,
int buf_len,
3960 kmp_affin_mask_t *mask);
3961extern kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
3962 kmp_affin_mask_t *mask);
3963extern void __kmp_affinity_initialize(kmp_affinity_t &affinity);
3964extern void __kmp_affinity_uninitialize(
void);
3965extern void __kmp_affinity_set_init_mask(
3966 int gtid,
int isa_root);
3967void __kmp_affinity_bind_init_mask(
int gtid);
3968extern void __kmp_affinity_bind_place(
int gtid);
3969extern void __kmp_affinity_determine_capable(
const char *env_var);
3970extern int __kmp_aux_set_affinity(
void **mask);
3971extern int __kmp_aux_get_affinity(
void **mask);
3972extern int __kmp_aux_get_affinity_max_proc();
3973extern int __kmp_aux_set_affinity_mask_proc(
int proc,
void **mask);
3974extern int __kmp_aux_unset_affinity_mask_proc(
int proc,
void **mask);
3975extern int __kmp_aux_get_affinity_mask_proc(
int proc,
void **mask);
3976extern void __kmp_balanced_affinity(kmp_info_t *th,
int team_size);
3977#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
3978extern int __kmp_get_first_osid_with_ecore(
void);
3980#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
3982extern int kmp_set_thread_affinity_mask_initial(
void);
3984static inline void __kmp_assign_root_init_mask() {
3985 int gtid = __kmp_entry_gtid();
3986 kmp_root_t *r = __kmp_threads[gtid]->th.th_root;
3987 if (r->r.r_uber_thread == __kmp_threads[gtid] && !r->r.r_affinity_assigned) {
3988 __kmp_affinity_set_init_mask(gtid, TRUE);
3989 __kmp_affinity_bind_init_mask(gtid);
3990 r->r.r_affinity_assigned = TRUE;
3993static inline void __kmp_reset_root_init_mask(
int gtid) {
3994 if (!KMP_AFFINITY_CAPABLE())
3996 kmp_info_t *th = __kmp_threads[gtid];
3997 kmp_root_t *r = th->th.th_root;
3998 if (r->r.r_uber_thread == th && r->r.r_affinity_assigned) {
3999 __kmp_set_system_affinity(__kmp_affin_origMask, FALSE);
4000 KMP_CPU_COPY(th->th.th_affin_mask, __kmp_affin_origMask);
4001 r->r.r_affinity_assigned = FALSE;
4005#define __kmp_assign_root_init_mask()
4006static inline void __kmp_reset_root_init_mask(
int gtid) {}
4011extern size_t __kmp_aux_capture_affinity(
int gtid,
const char *format,
4012 kmp_str_buf_t *buffer);
4013extern void __kmp_aux_display_affinity(
int gtid,
const char *format);
4015extern void __kmp_cleanup_hierarchy();
4016extern void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar);
4020extern int __kmp_futex_determine_capable(
void);
4024extern void __kmp_gtid_set_specific(
int gtid);
4025extern int __kmp_gtid_get_specific(
void);
4027extern double __kmp_read_cpu_time(
void);
4029extern int __kmp_read_system_info(
struct kmp_sys_info *info);
4032extern void __kmp_create_monitor(kmp_info_t *th);
4035extern void *__kmp_launch_thread(kmp_info_t *thr);
4037extern void __kmp_create_worker(
int gtid, kmp_info_t *th,
size_t stack_size);
4040extern int __kmp_still_running(kmp_info_t *th);
4041extern int __kmp_is_thread_alive(kmp_info_t *th, DWORD *exit_val);
4042extern void __kmp_free_handle(kmp_thread_t tHandle);
4046extern void __kmp_reap_monitor(kmp_info_t *th);
4048extern void __kmp_reap_worker(kmp_info_t *th);
4049extern void __kmp_terminate_thread(
int gtid);
4051extern int __kmp_try_suspend_mx(kmp_info_t *th);
4052extern void __kmp_lock_suspend_mx(kmp_info_t *th);
4053extern void __kmp_unlock_suspend_mx(kmp_info_t *th);
4055extern void __kmp_elapsed(
double *);
4056extern void __kmp_elapsed_tick(
double *);
4058extern void __kmp_enable(
int old_state);
4059extern void __kmp_disable(
int *old_state);
4061extern void __kmp_thread_sleep(
int millis);
4063extern void __kmp_common_initialize(
void);
4064extern void __kmp_common_destroy(
void);
4065extern void __kmp_common_destroy_gtid(
int gtid);
4068extern void __kmp_register_atfork(
void);
4070extern void __kmp_suspend_initialize(
void);
4071extern void __kmp_suspend_initialize_thread(kmp_info_t *th);
4072extern void __kmp_suspend_uninitialize_thread(kmp_info_t *th);
4074extern kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
4077__kmp_allocate_team(kmp_root_t *root,
int new_nproc,
int max_nproc,
4079 ompt_data_t ompt_parallel_data,
4081 kmp_proc_bind_t proc_bind, kmp_internal_control_t *new_icvs,
4082 int argc USE_NESTED_HOT_ARG(kmp_info_t *thr));
4083extern void __kmp_free_thread(kmp_info_t *);
4084extern void __kmp_free_team(kmp_root_t *,
4085 kmp_team_t *USE_NESTED_HOT_ARG(kmp_info_t *));
4086extern kmp_team_t *__kmp_reap_team(kmp_team_t *);
4090extern void __kmp_initialize_bget(kmp_info_t *th);
4091extern void __kmp_finalize_bget(kmp_info_t *th);
4093KMP_EXPORT
void *kmpc_malloc(
size_t size);
4094KMP_EXPORT
void *kmpc_aligned_malloc(
size_t size,
size_t alignment);
4095KMP_EXPORT
void *kmpc_calloc(
size_t nelem,
size_t elsize);
4096KMP_EXPORT
void *kmpc_realloc(
void *ptr,
size_t size);
4097KMP_EXPORT
void kmpc_free(
void *ptr);
4101extern int __kmp_barrier(
enum barrier_type bt,
int gtid,
int is_split,
4102 size_t reduce_size,
void *reduce_data,
4103 void (*reduce)(
void *,
void *));
4104extern void __kmp_end_split_barrier(
enum barrier_type bt,
int gtid);
4105extern int __kmp_barrier_gomp_cancel(
int gtid);
4111enum fork_context_e {
4117extern int __kmp_fork_call(
ident_t *loc,
int gtid,
4118 enum fork_context_e fork_context, kmp_int32 argc,
4119 microtask_t microtask, launch_t invoker,
4122extern void __kmp_join_call(
ident_t *loc,
int gtid
4125 enum fork_context_e fork_context
4128 int exit_teams = 0);
4130extern void __kmp_serialized_parallel(
ident_t *
id, kmp_int32 gtid);
4131extern void __kmp_internal_fork(
ident_t *
id,
int gtid, kmp_team_t *team);
4132extern void __kmp_internal_join(
ident_t *
id,
int gtid, kmp_team_t *team);
4133extern int __kmp_invoke_task_func(
int gtid);
4134extern void __kmp_run_before_invoked_task(
int gtid,
int tid,
4135 kmp_info_t *this_thr,
4137extern void __kmp_run_after_invoked_task(
int gtid,
int tid,
4138 kmp_info_t *this_thr,
4142KMP_EXPORT
int __kmpc_invoke_task_func(
int gtid);
4143extern int __kmp_invoke_teams_master(
int gtid);
4144extern void __kmp_teams_master(
int gtid);
4145extern int __kmp_aux_get_team_num();
4146extern int __kmp_aux_get_num_teams();
4147extern void __kmp_save_internal_controls(kmp_info_t *thread);
4148extern void __kmp_user_set_library(
enum library_type arg);
4149extern void __kmp_aux_set_library(
enum library_type arg);
4150extern void __kmp_aux_set_stacksize(
size_t arg);
4151extern void __kmp_aux_set_blocktime(
int arg, kmp_info_t *thread,
int tid);
4152extern void __kmp_aux_set_defaults(
char const *str,
size_t len);
4155void kmpc_set_blocktime(
int arg);
4156void ompc_set_nested(
int flag);
4157void ompc_set_dynamic(
int flag);
4158void ompc_set_num_threads(
int arg);
4160extern void __kmp_push_current_task_to_thread(kmp_info_t *this_thr,
4161 kmp_team_t *team,
int tid);
4162extern void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr);
4163extern kmp_task_t *__kmp_task_alloc(
ident_t *loc_ref, kmp_int32 gtid,
4164 kmp_tasking_flags_t *flags,
4165 size_t sizeof_kmp_task_t,
4166 size_t sizeof_shareds,
4167 kmp_routine_entry_t task_entry);
4168extern void __kmp_init_implicit_task(
ident_t *loc_ref, kmp_info_t *this_thr,
4169 kmp_team_t *team,
int tid,
4171extern void __kmp_finish_implicit_task(kmp_info_t *this_thr);
4172extern void __kmp_free_implicit_task(kmp_info_t *this_thr);
4174extern kmp_event_t *__kmpc_task_allow_completion_event(
ident_t *loc_ref,
4177extern void __kmp_fulfill_event(kmp_event_t *event);
4179extern void __kmp_free_task_team(kmp_info_t *thread,
4180 kmp_task_team_t *task_team);
4181extern void __kmp_reap_task_teams(
void);
4182extern void __kmp_push_task_team_node(kmp_info_t *thread, kmp_team_t *team);
4183extern void __kmp_pop_task_team_node(kmp_info_t *thread, kmp_team_t *team);
4184extern void __kmp_wait_to_unref_task_teams(
void);
4185extern void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team);
4186extern void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team);
4187extern void __kmp_task_team_wait(kmp_info_t *this_thr, kmp_team_t *team
4194extern void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread,
4197#define KMP_DEBUG_ASSERT_TASKTEAM_INVARIANT(team, thr) \
4199 __kmp_tasking_mode != tskm_task_teams || team->t.t_nproc == 1 || \
4200 thr->th.th_task_team == team->t.t_task_team[thr->th.th_task_state])
4202#define KMP_DEBUG_ASSERT_TASKTEAM_INVARIANT(team, thr)
4205extern int __kmp_is_address_mapped(
void *addr);
4206extern kmp_uint64 __kmp_hardware_timestamp(
void);
4209extern int __kmp_read_from_file(
char const *path,
char const *format, ...);
4217extern int __kmp_invoke_microtask(microtask_t pkfn,
int gtid,
int npr,
int argc,
4221 void **exit_frame_ptr
4234 size_t vector_length);
4238KMP_EXPORT
void *__kmpc_threadprivate(
ident_t *, kmp_int32 global_tid,
4239 void *data,
size_t size);
4266 kmp_critical_name *);
4268 kmp_critical_name *);
4269KMP_EXPORT
void __kmpc_critical_with_hint(
ident_t *, kmp_int32 global_tid,
4270 kmp_critical_name *, uint32_t hint);
4276 kmp_int32 global_tid);
4283 kmp_int32 numberOfSections);
4286KMP_EXPORT
void KMPC_FOR_STATIC_INIT(
ident_t *loc, kmp_int32 global_tid,
4287 kmp_int32 schedtype, kmp_int32 *plastiter,
4288 kmp_int *plower, kmp_int *pupper,
4289 kmp_int *pstride, kmp_int incr,
4295 size_t cpy_size,
void *cpy_data,
4296 void (*cpy_func)(
void *,
void *),
4302extern void KMPC_SET_NUM_THREADS(
int arg);
4303extern void KMPC_SET_DYNAMIC(
int flag);
4304extern void KMPC_SET_NESTED(
int flag);
4307KMP_EXPORT kmp_int32 __kmpc_omp_task(
ident_t *loc_ref, kmp_int32 gtid,
4308 kmp_task_t *new_task);
4309KMP_EXPORT kmp_task_t *__kmpc_omp_task_alloc(
ident_t *loc_ref, kmp_int32 gtid,
4311 size_t sizeof_kmp_task_t,
4312 size_t sizeof_shareds,
4313 kmp_routine_entry_t task_entry);
4314KMP_EXPORT kmp_task_t *__kmpc_omp_target_task_alloc(
4315 ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags,
size_t sizeof_kmp_task_t,
4316 size_t sizeof_shareds, kmp_routine_entry_t task_entry, kmp_int64 device_id);
4317KMP_EXPORT
void __kmpc_omp_task_begin_if0(
ident_t *loc_ref, kmp_int32 gtid,
4319KMP_EXPORT
void __kmpc_omp_task_complete_if0(
ident_t *loc_ref, kmp_int32 gtid,
4321KMP_EXPORT kmp_int32 __kmpc_omp_task_parts(
ident_t *loc_ref, kmp_int32 gtid,
4322 kmp_task_t *new_task);
4323KMP_EXPORT kmp_int32 __kmpc_omp_taskwait(
ident_t *loc_ref, kmp_int32 gtid);
4324KMP_EXPORT kmp_int32 __kmpc_omp_taskyield(
ident_t *loc_ref, kmp_int32 gtid,
4328void __kmpc_omp_task_begin(
ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task);
4329void __kmpc_omp_task_complete(
ident_t *loc_ref, kmp_int32 gtid,
4335KMP_EXPORT
void __kmpc_taskgroup(
ident_t *loc,
int gtid);
4336KMP_EXPORT
void __kmpc_end_taskgroup(
ident_t *loc,
int gtid);
4339 ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps,
4340 kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
4341 kmp_depend_info_t *noalias_dep_list);
4343KMP_EXPORT kmp_base_depnode_t *__kmpc_task_get_depnode(kmp_task_t *task);
4345KMP_EXPORT kmp_depnode_list_t *__kmpc_task_get_successors(kmp_task_t *task);
4349 kmp_depend_info_t *dep_list,
4350 kmp_int32 ndeps_noalias,
4351 kmp_depend_info_t *noalias_dep_list);
4354KMP_EXPORT
void __kmpc_omp_taskwait_deps_51(
ident_t *loc_ref, kmp_int32 gtid,
4356 kmp_depend_info_t *dep_list,
4357 kmp_int32 ndeps_noalias,
4358 kmp_depend_info_t *noalias_dep_list,
4359 kmp_int32 has_no_wait);
4361extern kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
4362 bool serialize_immediate);
4364KMP_EXPORT kmp_int32 __kmpc_cancel(
ident_t *loc_ref, kmp_int32 gtid,
4365 kmp_int32 cncl_kind);
4366KMP_EXPORT kmp_int32 __kmpc_cancellationpoint(
ident_t *loc_ref, kmp_int32 gtid,
4367 kmp_int32 cncl_kind);
4368KMP_EXPORT kmp_int32 __kmpc_cancel_barrier(
ident_t *loc_ref, kmp_int32 gtid);
4369KMP_EXPORT
int __kmp_get_cancellation_status(
int cancel_kind);
4373KMP_EXPORT
void __kmpc_taskloop(
ident_t *loc, kmp_int32 gtid, kmp_task_t *task,
4374 kmp_int32 if_val, kmp_uint64 *lb,
4375 kmp_uint64 *ub, kmp_int64 st, kmp_int32 nogroup,
4376 kmp_int32 sched, kmp_uint64 grainsize,
4378KMP_EXPORT
void __kmpc_taskloop_5(
ident_t *loc, kmp_int32 gtid,
4379 kmp_task_t *task, kmp_int32 if_val,
4380 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
4381 kmp_int32 nogroup, kmp_int32 sched,
4382 kmp_uint64 grainsize, kmp_int32 modifier,
4391 int num,
void *data);
4395 ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 naffins,
4396 kmp_task_affinity_info_t *affin_list);
4397KMP_EXPORT
void __kmp_set_num_teams(
int num_teams);
4398KMP_EXPORT
int __kmp_get_max_teams(
void);
4399KMP_EXPORT
void __kmp_set_teams_thread_limit(
int limit);
4400KMP_EXPORT
int __kmp_get_teams_thread_limit(
void);
4407KMP_EXPORT
void __kmpc_init_lock(
ident_t *loc, kmp_int32 gtid,
4409KMP_EXPORT
void __kmpc_init_nest_lock(
ident_t *loc, kmp_int32 gtid,
4411KMP_EXPORT
void __kmpc_destroy_lock(
ident_t *loc, kmp_int32 gtid,
4413KMP_EXPORT
void __kmpc_destroy_nest_lock(
ident_t *loc, kmp_int32 gtid,
4415KMP_EXPORT
void __kmpc_set_lock(
ident_t *loc, kmp_int32 gtid,
void **user_lock);
4416KMP_EXPORT
void __kmpc_set_nest_lock(
ident_t *loc, kmp_int32 gtid,
4418KMP_EXPORT
void __kmpc_unset_lock(
ident_t *loc, kmp_int32 gtid,
4420KMP_EXPORT
void __kmpc_unset_nest_lock(
ident_t *loc, kmp_int32 gtid,
4422KMP_EXPORT
int __kmpc_test_lock(
ident_t *loc, kmp_int32 gtid,
void **user_lock);
4423KMP_EXPORT
int __kmpc_test_nest_lock(
ident_t *loc, kmp_int32 gtid,
4426KMP_EXPORT
void __kmpc_init_lock_with_hint(
ident_t *loc, kmp_int32 gtid,
4427 void **user_lock, uintptr_t hint);
4428KMP_EXPORT
void __kmpc_init_nest_lock_with_hint(
ident_t *loc, kmp_int32 gtid,
4436static inline bool __kmp_tdg_is_recording(kmp_tdg_status_t status) {
4437 return status == KMP_TDG_RECORDING;
4440KMP_EXPORT kmp_int32 __kmpc_start_record_task(
ident_t *loc, kmp_int32 gtid,
4441 kmp_int32 input_flags,
4443KMP_EXPORT
void __kmpc_end_record_task(
ident_t *loc, kmp_int32 gtid,
4444 kmp_int32 input_flags, kmp_int32 tdg_id);
4449 ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
size_t reduce_size,
4450 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4451 kmp_critical_name *lck);
4453 kmp_critical_name *lck);
4455 ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
size_t reduce_size,
4456 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4457 kmp_critical_name *lck);
4459 kmp_critical_name *lck);
4463extern PACKED_REDUCTION_METHOD_T __kmp_determine_reduction_method(
4464 ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
size_t reduce_size,
4465 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4466 kmp_critical_name *lck);
4469KMP_EXPORT kmp_int32 __kmp_get_reduce_method(
void);
4471KMP_EXPORT kmp_uint64 __kmpc_get_taskid();
4472KMP_EXPORT kmp_uint64 __kmpc_get_parent_taskid();
4478KMP_EXPORT
void __kmpc_pop_num_threads(
ident_t *loc, kmp_int32 global_tid);
4480 kmp_int32 num_threads);
4481KMP_EXPORT
void __kmpc_push_num_threads_strict(
ident_t *loc,
4482 kmp_int32 global_tid,
4483 kmp_int32 num_threads,
4485 const char *message);
4488 kmp_uint32 list_length,
4489 kmp_int32 *num_threads_list);
4490KMP_EXPORT
void __kmpc_push_num_threads_list_strict(
4491 ident_t *loc, kmp_int32 global_tid, kmp_uint32 list_length,
4492 kmp_int32 *num_threads_list,
int severity,
const char *message);
4494KMP_EXPORT
void __kmpc_push_proc_bind(
ident_t *loc, kmp_int32 global_tid,
4497 kmp_int32 num_teams,
4498 kmp_int32 num_threads);
4500 kmp_int32 thread_limit);
4503 kmp_int32 num_teams_lb,
4504 kmp_int32 num_teams_ub,
4505 kmp_int32 num_threads);
4513KMP_EXPORT
void __kmpc_doacross_init(
ident_t *loc, kmp_int32 gtid,
4515 const struct kmp_dim *dims);
4516KMP_EXPORT
void __kmpc_doacross_wait(
ident_t *loc, kmp_int32 gtid,
4517 const kmp_int64 *vec);
4518KMP_EXPORT
void __kmpc_doacross_post(
ident_t *loc, kmp_int32 gtid,
4519 const kmp_int64 *vec);
4520KMP_EXPORT
void __kmpc_doacross_fini(
ident_t *loc, kmp_int32 gtid);
4523 void *data,
size_t size,
4528void kmp_threadprivate_insert_private_data(
int gtid,
void *pc_addr,
4529 void *data_addr,
size_t pc_size);
4530struct private_common *kmp_threadprivate_insert(
int gtid,
void *pc_addr,
4533void __kmp_threadprivate_resize_cache(
int newCapacity);
4534void __kmp_cleanup_threadprivate_caches();
4538#define KMPC_CONVENTION __cdecl
4540#define KMPC_CONVENTION
4544typedef enum omp_sched_t {
4545 omp_sched_static = 1,
4546 omp_sched_dynamic = 2,
4547 omp_sched_guided = 3,
4550typedef void *kmp_affinity_mask_t;
4553KMP_EXPORT
void KMPC_CONVENTION ompc_set_max_active_levels(
int);
4554KMP_EXPORT
void KMPC_CONVENTION ompc_set_schedule(omp_sched_t,
int);
4555KMP_EXPORT
int KMPC_CONVENTION ompc_get_ancestor_thread_num(
int);
4556KMP_EXPORT
int KMPC_CONVENTION ompc_get_team_size(
int);
4557KMP_EXPORT
int KMPC_CONVENTION
4558kmpc_set_affinity_mask_proc(
int, kmp_affinity_mask_t *);
4559KMP_EXPORT
int KMPC_CONVENTION
4560kmpc_unset_affinity_mask_proc(
int, kmp_affinity_mask_t *);
4561KMP_EXPORT
int KMPC_CONVENTION
4562kmpc_get_affinity_mask_proc(
int, kmp_affinity_mask_t *);
4564KMP_EXPORT
void KMPC_CONVENTION kmpc_set_stacksize(
int);
4565KMP_EXPORT
void KMPC_CONVENTION kmpc_set_stacksize_s(
size_t);
4566KMP_EXPORT
void KMPC_CONVENTION kmpc_set_library(
int);
4567KMP_EXPORT
void KMPC_CONVENTION kmpc_set_defaults(
char const *);
4568KMP_EXPORT
void KMPC_CONVENTION kmpc_set_disp_num_buffers(
int);
4569void KMP_EXPAND_NAME(ompc_set_affinity_format)(
char const *format);
4570size_t KMP_EXPAND_NAME(ompc_get_affinity_format)(
char *buffer,
size_t size);
4571void KMP_EXPAND_NAME(ompc_display_affinity)(
char const *format);
4572size_t KMP_EXPAND_NAME(ompc_capture_affinity)(
char *buffer,
size_t buf_size,
4573 char const *format);
4575enum kmp_target_offload_kind {
4580typedef enum kmp_target_offload_kind kmp_target_offload_kind_t;
4582extern kmp_target_offload_kind_t __kmp_target_offload;
4583extern int __kmpc_get_target_offload();
4586#define KMP_DEVICE_DEFAULT -1
4587#define KMP_DEVICE_ALL -11
4593typedef enum kmp_pause_status_t {
4595 kmp_soft_paused = 1,
4596 kmp_hard_paused = 2,
4597 kmp_stop_tool_paused = 3
4598} kmp_pause_status_t;
4601extern kmp_pause_status_t __kmp_pause_status;
4602extern int __kmpc_pause_resource(kmp_pause_status_t level);
4603extern int __kmp_pause_resource(kmp_pause_status_t level);
4605extern void __kmp_resume_if_soft_paused();
4609static inline void __kmp_resume_if_hard_paused() {
4610 if (__kmp_pause_status == kmp_hard_paused) {
4611 __kmp_pause_status = kmp_not_paused;
4615extern void __kmp_omp_display_env(
int verbose);
4618extern volatile int __kmp_init_hidden_helper;
4620extern volatile int __kmp_hidden_helper_team_done;
4622extern kmp_int32 __kmp_enable_hidden_helper;
4624extern kmp_info_t *__kmp_hidden_helper_main_thread;
4626extern kmp_info_t **__kmp_hidden_helper_threads;
4628extern kmp_int32 __kmp_hidden_helper_threads_num;
4630extern std::atomic<kmp_int32> __kmp_unexecuted_hidden_helper_tasks;
4632extern void __kmp_hidden_helper_initialize();
4633extern void __kmp_hidden_helper_threads_initz_routine();
4634extern void __kmp_do_initialize_hidden_helper_threads();
4635extern void __kmp_hidden_helper_threads_initz_wait();
4636extern void __kmp_hidden_helper_initz_release();
4637extern void __kmp_hidden_helper_threads_deinitz_wait();
4638extern void __kmp_hidden_helper_threads_deinitz_release();
4639extern void __kmp_hidden_helper_main_thread_wait();
4640extern void __kmp_hidden_helper_worker_thread_wait();
4641extern void __kmp_hidden_helper_worker_thread_signal();
4642extern void __kmp_hidden_helper_main_thread_release();
4645#define KMP_HIDDEN_HELPER_THREAD(gtid) \
4646 ((gtid) >= 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4648#define KMP_HIDDEN_HELPER_WORKER_THREAD(gtid) \
4649 ((gtid) > 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4651#define KMP_HIDDEN_HELPER_MAIN_THREAD(gtid) \
4652 ((gtid) == 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4654#define KMP_HIDDEN_HELPER_TEAM(team) \
4655 (team->t.t_threads[0] == __kmp_hidden_helper_main_thread)
4659#define KMP_GTID_TO_SHADOW_GTID(gtid) \
4660 ((gtid) % (__kmp_hidden_helper_threads_num - 1) + 2)
4665static inline int __kmp_adjust_gtid_for_hidden_helpers(
int gtid) {
4666 int adjusted_gtid = gtid;
4667 if (__kmp_hidden_helper_threads_num > 0 && gtid > 0 &&
4668 gtid - __kmp_hidden_helper_threads_num >= 0) {
4669 adjusted_gtid -= __kmp_hidden_helper_threads_num;
4671 return adjusted_gtid;
4675typedef enum kmp_severity_t {
4676 severity_warning = 1,
4679extern void __kmpc_error(
ident_t *loc,
int severity,
const char *message);
4682KMP_EXPORT
void __kmpc_scope(
ident_t *loc, kmp_int32 gtid,
void *reserved);
4683KMP_EXPORT
void __kmpc_end_scope(
ident_t *loc, kmp_int32 gtid,
void *reserved);
4689template <
bool C,
bool S>
4690extern void __kmp_suspend_32(
int th_gtid, kmp_flag_32<C, S> *flag);
4691template <
bool C,
bool S>
4692extern void __kmp_suspend_64(
int th_gtid, kmp_flag_64<C, S> *flag);
4693template <
bool C,
bool S>
4694extern void __kmp_atomic_suspend_64(
int th_gtid,
4695 kmp_atomic_flag_64<C, S> *flag);
4696extern void __kmp_suspend_oncore(
int th_gtid, kmp_flag_oncore *flag);
4697#if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
4698template <
bool C,
bool S>
4699extern void __kmp_mwait_32(
int th_gtid, kmp_flag_32<C, S> *flag);
4700template <
bool C,
bool S>
4701extern void __kmp_mwait_64(
int th_gtid, kmp_flag_64<C, S> *flag);
4702template <
bool C,
bool S>
4703extern void __kmp_atomic_mwait_64(
int th_gtid, kmp_atomic_flag_64<C, S> *flag);
4704extern void __kmp_mwait_oncore(
int th_gtid, kmp_flag_oncore *flag);
4706template <
bool C,
bool S>
4707extern void __kmp_resume_32(
int target_gtid, kmp_flag_32<C, S> *flag);
4708template <
bool C,
bool S>
4709extern void __kmp_resume_64(
int target_gtid, kmp_flag_64<C, S> *flag);
4710template <
bool C,
bool S>
4711extern void __kmp_atomic_resume_64(
int target_gtid,
4712 kmp_atomic_flag_64<C, S> *flag);
4713extern void __kmp_resume_oncore(
int target_gtid, kmp_flag_oncore *flag);
4715template <
bool C,
bool S>
4716int __kmp_execute_tasks_32(kmp_info_t *thread, kmp_int32 gtid,
4717 kmp_flag_32<C, S> *flag,
int final_spin,
4718 int *thread_finished,
4722 kmp_int32 is_constrained);
4723template <
bool C,
bool S>
4724int __kmp_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid,
4725 kmp_flag_64<C, S> *flag,
int final_spin,
4726 int *thread_finished,
4730 kmp_int32 is_constrained);
4731template <
bool C,
bool S>
4732int __kmp_atomic_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid,
4733 kmp_atomic_flag_64<C, S> *flag,
4734 int final_spin,
int *thread_finished,
4738 kmp_int32 is_constrained);
4739int __kmp_execute_tasks_oncore(kmp_info_t *thread, kmp_int32 gtid,
4740 kmp_flag_oncore *flag,
int final_spin,
4741 int *thread_finished,
4745 kmp_int32 is_constrained);
4747extern int __kmp_nesting_mode;
4748extern int __kmp_nesting_mode_nlevels;
4749extern int *__kmp_nesting_nth_level;
4750extern void __kmp_init_nesting_mode();
4751extern void __kmp_set_nesting_mode_threads();
4759class kmp_safe_raii_file_t {
4763 if (f && f != stdout && f != stderr) {
4770 kmp_safe_raii_file_t() : f(
nullptr) {}
4771 kmp_safe_raii_file_t(
const char *filename,
const char *mode,
4772 const char *env_var =
nullptr)
4774 open(filename, mode, env_var);
4776 kmp_safe_raii_file_t(
const kmp_safe_raii_file_t &other) =
delete;
4777 kmp_safe_raii_file_t &operator=(
const kmp_safe_raii_file_t &other) =
delete;
4778 ~kmp_safe_raii_file_t() { close(); }
4783 void open(
const char *filename,
const char *mode,
4784 const char *env_var =
nullptr) {
4786 f = fopen(filename, mode);
4790 __kmp_fatal(KMP_MSG(CantOpenFileForReading, filename), KMP_ERR(code),
4791 KMP_HNT(CheckEnvVar, env_var, filename), __kmp_msg_null);
4793 __kmp_fatal(KMP_MSG(CantOpenFileForReading, filename), KMP_ERR(code),
4802 f = fopen(filename, mode);
4819 operator bool() {
return bool(f); }
4820 operator FILE *() {
return f; }
4823template <
typename SourceType,
typename TargetType,
4824 bool isSourceSmaller = (
sizeof(SourceType) <
sizeof(TargetType)),
4825 bool isSourceEqual = (
sizeof(SourceType) ==
sizeof(TargetType)),
4826 bool isSourceSigned = std::is_signed<SourceType>::value,
4827 bool isTargetSigned = std::is_signed<TargetType>::value>
4828struct kmp_convert {};
4831template <
typename SourceType,
typename TargetType>
4832struct kmp_convert<SourceType, TargetType, true, false, true, true> {
4833 static TargetType to(SourceType src) {
return (TargetType)src; }
4836template <
typename SourceType,
typename TargetType>
4837struct kmp_convert<SourceType, TargetType, false, true, true, true> {
4838 static TargetType to(SourceType src) {
return src; }
4841template <
typename SourceType,
typename TargetType>
4842struct kmp_convert<SourceType, TargetType, false, false, true, true> {
4843 static TargetType to(SourceType src) {
4844 KMP_ASSERT(src <=
static_cast<SourceType
>(
4845 (std::numeric_limits<TargetType>::max)()));
4846 KMP_ASSERT(src >=
static_cast<SourceType
>(
4847 (std::numeric_limits<TargetType>::min)()));
4848 return (TargetType)src;
4854template <
typename SourceType,
typename TargetType>
4855struct kmp_convert<SourceType, TargetType, true, false, true, false> {
4856 static TargetType to(SourceType src) {
4857 KMP_ASSERT(src >= 0);
4858 return (TargetType)src;
4862template <
typename SourceType,
typename TargetType>
4863struct kmp_convert<SourceType, TargetType, false, true, true, false> {
4864 static TargetType to(SourceType src) {
4865 KMP_ASSERT(src >= 0);
4866 return (TargetType)src;
4870template <
typename SourceType,
typename TargetType>
4871struct kmp_convert<SourceType, TargetType, false, false, true, false> {
4872 static TargetType to(SourceType src) {
4873 KMP_ASSERT(src >= 0);
4874 KMP_ASSERT(src <=
static_cast<SourceType
>(
4875 (std::numeric_limits<TargetType>::max)()));
4876 return (TargetType)src;
4882template <
typename SourceType,
typename TargetType>
4883struct kmp_convert<SourceType, TargetType, true, false, false, true> {
4884 static TargetType to(SourceType src) {
return (TargetType)src; }
4887template <
typename SourceType,
typename TargetType>
4888struct kmp_convert<SourceType, TargetType, false, true, false, true> {
4889 static TargetType to(SourceType src) {
4890 KMP_ASSERT(src <=
static_cast<SourceType
>(
4891 (std::numeric_limits<TargetType>::max)()));
4892 return (TargetType)src;
4896template <
typename SourceType,
typename TargetType>
4897struct kmp_convert<SourceType, TargetType, false, false, false, true> {
4898 static TargetType to(SourceType src) {
4899 KMP_ASSERT(src <=
static_cast<SourceType
>(
4900 (std::numeric_limits<TargetType>::max)()));
4901 return (TargetType)src;
4907template <
typename SourceType,
typename TargetType>
4908struct kmp_convert<SourceType, TargetType, true, false, false, false> {
4909 static TargetType to(SourceType src) {
return (TargetType)src; }
4912template <
typename SourceType,
typename TargetType>
4913struct kmp_convert<SourceType, TargetType, false, true, false, false> {
4914 static TargetType to(SourceType src) {
return src; }
4917template <
typename SourceType,
typename TargetType>
4918struct kmp_convert<SourceType, TargetType, false, false, false, false> {
4919 static TargetType to(SourceType src) {
4920 KMP_ASSERT(src <=
static_cast<SourceType
>(
4921 (std::numeric_limits<TargetType>::max)()));
4922 return (TargetType)src;
4926template <
typename T1,
typename T2>
4927static inline void __kmp_type_convert(T1 src, T2 *dest) {
4928 *dest = kmp_convert<T1, T2>::to(src);
int try_open(const char *filename, const char *mode)
void open(const char *filename, const char *mode, const char *env_var=nullptr)
@ KMP_IDENT_WORK_SECTIONS
@ KMP_IDENT_ATOMIC_HINT_MASK
@ KMP_IDENT_WORK_DISTRIBUTE
@ KMP_IDENT_ATOMIC_REDUCE
KMP_EXPORT kmp_int32 __kmpc_ok_to_fork(ident_t *)
KMP_EXPORT void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_fork_call_if(ident_t *loc, kmp_int32 nargs, kmpc_micro microtask, kmp_int32 cond, void *args)
KMP_EXPORT void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_set_thread_limit(ident_t *loc, kmp_int32 global_tid, kmp_int32 thread_limit)
KMP_EXPORT void __kmpc_serialized_parallel(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_push_num_threads_list(ident_t *loc, kmp_int32 global_tid, kmp_uint32 list_length, kmp_int32 *num_threads_list)
KMP_EXPORT void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_fork_call(ident_t *, kmp_int32 nargs, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid)
void(* kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
KMP_EXPORT void __kmpc_push_num_teams_51(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams_lb, kmp_int32 num_teams_ub, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_begin(ident_t *, kmp_int32 flags)
KMP_EXPORT void __kmpc_end(ident_t *)
KMP_EXPORT void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT void __kmpc_end_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_barrier_master_nowait(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_flush(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
KMP_EXPORT void * __kmpc_task_reduction_get_th_data(int gtid, void *tg, void *d)
KMP_EXPORT void * __kmpc_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
KMP_EXPORT void * __kmpc_taskred_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
KMP_EXPORT bool __kmpc_omp_has_task_team(kmp_int32 gtid)
KMP_EXPORT void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask)
KMP_EXPORT void __kmpc_task_reduction_modifier_fini(ident_t *loc, int gtid, int is_ws)
KMP_EXPORT kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 naffins, kmp_task_affinity_info_t *affin_list)
KMP_EXPORT void * __kmpc_task_reduction_init(int gtid, int num_data, void *data)
KMP_EXPORT void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask)
KMP_EXPORT void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT void * __kmpc_taskred_init(int gtid, int num_data, void *data)
KMP_EXPORT void ** __kmpc_omp_get_target_async_handle_ptr(kmp_int32 gtid)
void(* kmpc_dtor)(void *)
void *(* kmpc_cctor)(void *, void *)
KMP_EXPORT void __kmpc_threadprivate_register(ident_t *, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
KMP_EXPORT void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *), kmp_int32 didit)
void *(* kmpc_cctor_vec)(void *, void *, size_t)
void *(* kmpc_ctor)(void *)
KMP_EXPORT void * __kmpc_copyprivate_light(ident_t *loc, kmp_int32 gtid, void *cpy_data)
void *(* kmpc_ctor_vec)(void *, size_t)
KMP_EXPORT void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
void(* kmpc_dtor_vec)(void *, size_t)
KMP_EXPORT void __kmpc_threadprivate_register_vec(ident_t *, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)
KMP_EXPORT kmp_int32 __kmpc_global_num_threads(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_global_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_in_parallel(ident_t *loc)
KMP_EXPORT kmp_int32 __kmpc_bound_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_bound_num_threads(ident_t *)
KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
KMP_EXPORT void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_masked(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_next_section(ident_t *loc, kmp_int32 global_tid, kmp_int32 numberOfSections)
KMP_EXPORT void __kmpc_end_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_sections(ident_t *loc, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_single(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_sections_init(ident_t *loc, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_masked(ident_t *, kmp_int32 global_tid, kmp_int32 filter)
void __kmpc_dispatch_init_4(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int32 lb, kmp_int32 ub, kmp_int32 st, kmp_int32 chunk)
KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
@ kmp_distribute_static_chunked
@ kmp_sch_modifier_monotonic
@ kmp_sch_modifier_nonmonotonic
Memory allocator information is shared with offload runtime.
Memory space informaition is shared with offload runtime.