[OpenMP] Remove unused variables from libomp code

Several variables were left unused as a result of different patches removing
their use.

Two variables have some use:
`poll_count` is used by the KMP_BLOCKING macro only under certain conditions.
Adding (void) to tell the compiler to ignore the unused variable.

`padding` is a dummy stack allocation with no intent to be used. Also adding
(void) to make the compiler ignore the unused variable.

Differential Revision: https://reviews.llvm.org/D104303
This commit is contained in:
Joachim Protze
2021-06-15 09:17:26 +02:00
parent 308f6a5245
commit cff215565e
8 changed files with 3 additions and 22 deletions

View File

@@ -35,7 +35,7 @@ function(libomp_get_cxxflags cxxflags)
libomp_append(flags_local -Wno-stringop-truncation LIBOMP_HAVE_WNO_STRINGOP_TRUNCATION_FLAG)
libomp_append(flags_local -Wno-switch LIBOMP_HAVE_WNO_SWITCH_FLAG)
libomp_append(flags_local -Wno-uninitialized LIBOMP_HAVE_WNO_UNINITIALIZED_FLAG)
libomp_append(flags_local -Wno-unused-but-set-variable LIBOMP_HAVE_WNO_UNUSED_BUT_SET_VARIABLE_FLAG)
# libomp_append(flags_local -Wno-unused-but-set-variable LIBOMP_HAVE_WNO_UNUSED_BUT_SET_VARIABLE_FLAG)
libomp_append(flags_local -Wno-return-type-c-linkage LIBOMP_HAVE_WNO_RETURN_TYPE_C_LINKAGE_FLAG)
libomp_append(flags_local -Wno-cast-qual LIBOMP_HAVE_WNO_CAST_QUAL_FLAG)
libomp_append(flags_local -Wno-int-to-void-pointer-cast LIBOMP_HAVE_WNO_INT_TO_VOID_POINTER_CAST_FLAG)

View File

@@ -2608,12 +2608,6 @@ restart_radix_check:
nCoresPerPkg = maxCt[coreIdIndex];
nPackages = totals[pkgIdIndex];
// Check to see if the machine topology is uniform
unsigned prod = totals[maxIndex];
for (index = threadIdIndex; index < maxIndex; index++) {
prod *= maxCt[index];
}
// When affinity is off, this routine will still be called to set
// __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
// Make sure all these vars are set correctly, and return now if affinity is

View File

@@ -2519,12 +2519,6 @@ void __kmpc_destroy_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) {
if (!codeptr)
codeptr = OMPT_GET_RETURN_ADDRESS(0);
if (ompt_enabled.ompt_callback_lock_destroy) {
kmp_user_lock_p lck;
if (KMP_EXTRACT_D_TAG(user_lock) == 0) {
lck = ((kmp_indirect_lock_t *)KMP_LOOKUP_I_LOCK(user_lock))->lock;
} else {
lck = (kmp_user_lock_p)user_lock;
}
ompt_callbacks.ompt_callback(ompt_callback_lock_destroy)(
ompt_mutex_lock, (ompt_wait_id_t)(uintptr_t)user_lock, codeptr);
}

View File

@@ -128,12 +128,10 @@ int FTN_STDCALL FTN_GET_BLOCKTIME(void) {
return __kmps_get_blocktime();
#else
int gtid, tid;
kmp_info_t *thread;
kmp_team_p *team;
gtid = __kmp_entry_gtid();
tid = __kmp_tid_from_gtid(gtid);
thread = __kmp_thread_from_gtid(gtid);
team = __kmp_threads[gtid]->th.th_team;
/* These must match the settings used in __kmp_wait_sleep() */

View File

@@ -1091,7 +1091,6 @@ __kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t *lck,
volatile kmp_int32 *head_id_p = &lck->lk.head_id;
volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
volatile kmp_uint32 *spin_here_p;
kmp_int32 need_mf = 1;
#if OMPT_SUPPORT
ompt_state_t prev_state = ompt_state_undefined;
@@ -1142,7 +1141,6 @@ __kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t *lck,
if ( t > 0 ) condition in the enqueued case below, which is not
necessary for this state transition */
need_mf = 0;
/* try (-1,0)->(tid,tid) */
enqueued = KMP_COMPARE_AND_STORE_ACQ64((volatile kmp_int64 *)tail_id_p,
KMP_PACK_64(-1, 0),
@@ -1164,7 +1162,6 @@ __kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t *lck,
if (tail == 0) {
enqueued = FALSE;
} else {
need_mf = 0;
/* try (h,t) or (h,h)->(h,tid) */
enqueued = KMP_COMPARE_AND_STORE_ACQ32(tail_id_p, tail, gtid + 1);

View File

@@ -1133,7 +1133,6 @@ void __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
#if OMPT_SUPPORT
ompt_data_t ompt_parallel_data = ompt_data_none;
ompt_data_t *implicit_task_data;
void *codeptr = OMPT_LOAD_RETURN_ADDRESS(global_tid);
if (ompt_enabled.enabled &&
this_thr->th.ompt_thread_info.state != ompt_state_overhead) {
@@ -1326,7 +1325,6 @@ void __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
// don't use lw_taskteam after linking. content was swaped
/* OMPT implicit task begin */
implicit_task_data = OMPT_CUR_TASK_DATA(this_thr);
if (ompt_enabled.ompt_callback_implicit_task) {
ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
ompt_scope_begin, OMPT_CUR_TEAM_DATA(this_thr),

View File

@@ -179,7 +179,6 @@ __kmp_wait_template(kmp_info_t *this_thr,
kmp_uint32 spins;
int th_gtid;
int tasks_completed = FALSE;
int oversubscribed;
#if !KMP_USE_MONITOR
kmp_uint64 poll_count;
kmp_uint64 hibernate_goal;
@@ -321,10 +320,10 @@ final_spin=FALSE)
} else
hibernate_goal = KMP_NOW() + this_thr->th.th_team_bt_intervals;
poll_count = 0;
(void)poll_count;
#endif // KMP_USE_MONITOR
}
oversubscribed = (TCR_4(__kmp_nth) > __kmp_avail_proc);
KMP_MB();
// Main wait spin loop

View File

@@ -515,6 +515,7 @@ static void *__kmp_launch_worker(void *thr) {
KMP_OS_OPENBSD
if (__kmp_stkoffset > 0 && gtid > 0) {
padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
(void)padding;
}
#endif