tvl-depot/absl/synchronization/internal/create_thread_identity.cc
Abseil Team e96ae2203b Export of internal Abseil changes
--
074a799119ac881b8b8ce59ef7a3166d1aa025ac by Tom Manshreck <shreck@google.com>:

nit: Add return info for StrCat

PiperOrigin-RevId: 278647298

--
d58a2a39ab6f50266cc695506ba2e86bdb45d795 by Mark Barolak <mbar@google.com>:

Stop suppressing no-nested-anon-types warnings because there aren't actually any warnings to suppress.

PiperOrigin-RevId: 278440548

--
445051bd280b9a6f608a8c80b3d7cafcc1377a03 by Abseil Team <absl-team@google.com>:

ResetThreadIdentity does not need to clear identity->waiter_state.

ResetThreadIdentity is only called by NewThreadIdentity. NewThreadIdentity is
only called by CreateThreadIdentity. CreateThreadIdentity calls
PerThreadSem::Init, which initializes identity->waiter_state, immediately after
calling NewThreadIdentity. Therefore ResetThreadIdentity does not need to clear
identity->waiter_state.
PiperOrigin-RevId: 278429844

--
c2079b664d92be40d5e365abcca4e9b3505a75a6 by Abseil Team <absl-team@google.com>:

Delete the f->header.magic check in LowLevelAlloc::Free().

The f->header.magic check in LowLevelAlloc::Free() is redundant, because
AddToFreeList() will immediately perform the same check.

Also fix a typo in the comment that documents the lock requirements for
Next(). The comment should say "L >= arena->mu", which is equivalent to
EXCLUSIVE_LOCKS_REQUIRED(arena->mu).

NOTE: LowLevelAlloc::Free() performs the f->header.magic check without
holding the arena lock. This may have caused the TSAN data race warning
reported in bug 143697235.
PiperOrigin-RevId: 278414140

--
5534f35ce677165700117d868f51607ed1f0d73b by Greg Falcon <gfalcon@google.com>:

Add an internal (unsupported) PiecewiseCombiner class to allow hashing buffers piecewise.

PiperOrigin-RevId: 278388902
GitOrigin-RevId: 074a799119ac881b8b8ce59ef7a3166d1aa025ac
Change-Id: I61734850cbbb01c7585e8c736a5bb56e416512a8
2019-11-05 16:41:17 -05:00

138 lines
5.2 KiB
C++

// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdint.h>
#include <new>
// This file is a no-op if the required LowLevelAlloc support is missing.
#include "absl/base/internal/low_level_alloc.h"
#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
#include <string.h>
#include "absl/base/attributes.h"
#include "absl/base/internal/spinlock.h"
#include "absl/base/internal/thread_identity.h"
#include "absl/synchronization/internal/per_thread_sem.h"
namespace absl {
namespace synchronization_internal {
// ThreadIdentity storage is persistent, we maintain a free-list of previously
// released ThreadIdentity objects.
static base_internal::SpinLock freelist_lock(
base_internal::kLinkerInitialized);
static base_internal::ThreadIdentity* thread_identity_freelist;
// A per-thread destructor for reclaiming associated ThreadIdentity objects.
// Since we must preserve their storage we cache them for re-use.
void ReclaimThreadIdentity(void* v) {
base_internal::ThreadIdentity* identity =
static_cast<base_internal::ThreadIdentity*>(v);
// all_locks might have been allocated by the Mutex implementation.
// We free it here when we are notified that our thread is dying.
if (identity->per_thread_synch.all_locks != nullptr) {
base_internal::LowLevelAlloc::Free(identity->per_thread_synch.all_locks);
}
PerThreadSem::Destroy(identity);
// We must explicitly clear the current thread's identity:
// (a) Subsequent (unrelated) per-thread destructors may require an identity.
// We must guarantee a new identity is used in this case (this instructor
// will be reinvoked up to PTHREAD_DESTRUCTOR_ITERATIONS in this case).
// (b) ThreadIdentity implementations may depend on memory that is not
// reinitialized before reuse. We must allow explicit clearing of the
// association state in this case.
base_internal::ClearCurrentThreadIdentity();
{
base_internal::SpinLockHolder l(&freelist_lock);
identity->next = thread_identity_freelist;
thread_identity_freelist = identity;
}
}
// Return value rounded up to next multiple of align.
// Align must be a power of two.
static intptr_t RoundUp(intptr_t addr, intptr_t align) {
return (addr + align - 1) & ~(align - 1);
}
static void ResetThreadIdentity(base_internal::ThreadIdentity* identity) {
base_internal::PerThreadSynch* pts = &identity->per_thread_synch;
pts->next = nullptr;
pts->skip = nullptr;
pts->may_skip = false;
pts->waitp = nullptr;
pts->suppress_fatal_errors = false;
pts->readers = 0;
pts->priority = 0;
pts->next_priority_read_cycles = 0;
pts->state.store(base_internal::PerThreadSynch::State::kAvailable,
std::memory_order_relaxed);
pts->maybe_unlocking = false;
pts->wake = false;
pts->cond_waiter = false;
pts->all_locks = nullptr;
identity->blocked_count_ptr = nullptr;
identity->ticker.store(0, std::memory_order_relaxed);
identity->wait_start.store(0, std::memory_order_relaxed);
identity->is_idle.store(false, std::memory_order_relaxed);
identity->next = nullptr;
}
static base_internal::ThreadIdentity* NewThreadIdentity() {
base_internal::ThreadIdentity* identity = nullptr;
{
// Re-use a previously released object if possible.
base_internal::SpinLockHolder l(&freelist_lock);
if (thread_identity_freelist) {
identity = thread_identity_freelist; // Take list-head.
thread_identity_freelist = thread_identity_freelist->next;
}
}
if (identity == nullptr) {
// Allocate enough space to align ThreadIdentity to a multiple of
// PerThreadSynch::kAlignment. This space is never released (it is
// added to a freelist by ReclaimThreadIdentity instead).
void* allocation = base_internal::LowLevelAlloc::Alloc(
sizeof(*identity) + base_internal::PerThreadSynch::kAlignment - 1);
// Round up the address to the required alignment.
identity = reinterpret_cast<base_internal::ThreadIdentity*>(
RoundUp(reinterpret_cast<intptr_t>(allocation),
base_internal::PerThreadSynch::kAlignment));
}
ResetThreadIdentity(identity);
return identity;
}
// Allocates and attaches ThreadIdentity object for the calling thread. Returns
// the new identity.
// REQUIRES: CurrentThreadIdentity(false) == nullptr
base_internal::ThreadIdentity* CreateThreadIdentity() {
base_internal::ThreadIdentity* identity = NewThreadIdentity();
PerThreadSem::Init(identity);
// Associate the value with the current thread, and attach our destructor.
base_internal::SetCurrentThreadIdentity(identity, ReclaimThreadIdentity);
return identity;
}
} // namespace synchronization_internal
} // namespace absl
#endif // ABSL_LOW_LEVEL_ALLOC_MISSING