Skip to content

Commit

Permalink
Rollback "Mutex: Remove destructor in release build"
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 577180526
Change-Id: Iec53709456805ca8dc5327669cc0f6c95825d0e9
  • Loading branch information
dvyukov authored and copybara-github committed Oct 27, 2023
1 parent cdb1e7f commit 89d2caa
Show file tree
Hide file tree
Showing 3 changed files with 75 additions and 113 deletions.
150 changes: 74 additions & 76 deletions absl/synchronization/mutex.cc
Original file line number Diff line number Diff line change
Expand Up @@ -202,23 +202,31 @@ int MutexDelay(int32_t c, int mode) {
// Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
// "*pv | bits" if necessary. Wait until (*pv & wait_until_clear)==0
// before making any change.
// Returns true if bits were previously unset and set by the call.
// This is used to set flags in mutex and condition variable words.
static bool AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
intptr_t wait_until_clear) {
for (;;) {
intptr_t v = pv->load(std::memory_order_relaxed);
if ((v & bits) == bits) {
return false;
}
if ((v & wait_until_clear) != 0) {
continue;
}
if (pv->compare_exchange_weak(v, v | bits, std::memory_order_release,
std::memory_order_relaxed)) {
return true;
}
}
intptr_t v;
do {
v = pv->load(std::memory_order_relaxed);
} while ((v & bits) != bits &&
((v & wait_until_clear) != 0 ||
!pv->compare_exchange_weak(v, v | bits, std::memory_order_release,
std::memory_order_relaxed)));
}

// Ensure that "(*pv & bits) == 0" by doing an atomic update of "*pv" to
// "*pv & ~bits" if necessary. Wait until (*pv & wait_until_clear)==0
// before making any change.
// This is used to unset flags in mutex and condition variable words.
static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
intptr_t wait_until_clear) {
intptr_t v;
do {
v = pv->load(std::memory_order_relaxed);
} while ((v & bits) != 0 &&
((v & wait_until_clear) != 0 ||
!pv->compare_exchange_weak(v, v & ~bits, std::memory_order_release,
std::memory_order_relaxed)));
}

//------------------------------------------------------------------
Expand Down Expand Up @@ -329,49 +337,12 @@ static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
const char* name, intptr_t bits,
intptr_t lockbit) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
SynchEvent* e;
// first look for existing SynchEvent struct..
synch_event_mu.Lock();
// When a Mutex/CondVar is destroyed, we don't remove the associated
// SynchEvent to keep destructors empty in release builds for performance
// reasons. If the current call is the first to set bits (kMuEvent/kCVEvent),
// we don't look up the existing even because (if it exists, it must be for
// the previous Mutex/CondVar that existed at the same address).
// The leaking events must not be a problem for tests, which should create
// bounded amount of events. And debug logging is not supposed to be enabled
// in production. However, if it's accidentally enabled, or briefly enabled
// for some debugging, we don't want to crash the program. Instead we drop
// all events, if we accumulated too many of them. Size of a single event
// is ~48 bytes, so 100K events is ~5 MB.
// Additionally we could delete the old event for the same address,
// but it would require a better hashmap (if we accumulate too many events,
// linked lists will grow and traversing them will be very slow).
constexpr size_t kMaxSynchEventCount = 100 << 10;
// Total number of live synch events.
static size_t synch_event_count ABSL_GUARDED_BY(synch_event_mu);
if (++synch_event_count > kMaxSynchEventCount) {
synch_event_count = 0;
ABSL_RAW_LOG(ERROR,
"Accumulated %zu Mutex debug objects. If you see this"
" in production, it may mean that the production code"
" accidentally calls "
"Mutex/CondVar::EnableDebugLog/EnableInvariantDebugging.",
kMaxSynchEventCount);
for (auto*& head : synch_event) {
for (auto* e = head; e != nullptr;) {
SynchEvent* next = e->next;
if (--(e->refcount) == 0) {
base_internal::LowLevelAlloc::Free(e);
}
e = next;
}
head = nullptr;
}
}
SynchEvent* e = nullptr;
if (!AtomicSetBits(addr, bits, lockbit)) {
for (e = synch_event[h];
e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
e = e->next) {
}
for (e = synch_event[h];
e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
e = e->next) {
}
if (e == nullptr) { // no SynchEvent struct found; make one.
if (name == nullptr) {
Expand All @@ -387,6 +358,7 @@ static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
e->log = false;
strcpy(e->name, name); // NOLINT(runtime/printf)
e->next = synch_event[h];
AtomicSetBits(addr, bits, lockbit);
synch_event[h] = e;
} else {
e->refcount++; // for return value
Expand All @@ -395,18 +367,48 @@ static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
return e;
}

// Deallocate the SynchEvent *e, whose refcount has fallen to zero.
static void DeleteSynchEvent(SynchEvent* e) {
base_internal::LowLevelAlloc::Free(e);
}

// Decrement the reference count of *e, or do nothing if e==null.
static void UnrefSynchEvent(SynchEvent* e) {
if (e != nullptr) {
synch_event_mu.Lock();
bool del = (--(e->refcount) == 0);
synch_event_mu.Unlock();
if (del) {
base_internal::LowLevelAlloc::Free(e);
DeleteSynchEvent(e);
}
}
}

// Forget the mapping from the object (Mutex or CondVar) at address addr
// to SynchEvent object, and clear "bits" in its word (waiting until lockbit
// is clear before doing so).
static void ForgetSynchEvent(std::atomic<intptr_t>* addr, intptr_t bits,
intptr_t lockbit) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
SynchEvent** pe;
SynchEvent* e;
synch_event_mu.Lock();
for (pe = &synch_event[h];
(e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
pe = &e->next) {
}
bool del = false;
if (e != nullptr) {
*pe = e->next;
del = (--(e->refcount) == 0);
}
AtomicClearBits(addr, bits, lockbit);
synch_event_mu.Unlock();
if (del) {
DeleteSynchEvent(e);
}
}

// Return a refcounted reference to the SynchEvent of the object at address
// "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
// called.
Expand Down Expand Up @@ -730,35 +732,25 @@ static unsigned TsanFlags(Mutex::MuHow how) {
}
#endif

#if defined(__APPLE__) || defined(ABSL_BUILD_DLL)
// When building a dll symbol export lists may reference the destructor
// and want it to be an exported symbol rather than an inline function.
// Some apple builds also do dynamic library build but don't say it explicitly.
Mutex::~Mutex() { Dtor(); }
#endif
static bool DebugOnlyIsExiting() {
return false;
}

#if !defined(NDEBUG) || defined(ABSL_HAVE_THREAD_SANITIZER)
void Mutex::Dtor() {
Mutex::~Mutex() {
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) {
ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);
}
if (kDebugMode) {
this->ForgetDeadlockInfo();
}
ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
}
#endif

void Mutex::EnableDebugLog(const char* name) {
SynchEvent* e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
e->log = true;
UnrefSynchEvent(e);
// This prevents "error: undefined symbol: absl::Mutex::~Mutex()"
// in a release build (NDEBUG defined) when a test does "#undef NDEBUG"
// to use assert macro. In such case, the test does not get the dtor
// definition because it's supposed to be outline when NDEBUG is not defined,
// and this source file does not define one either because NDEBUG is defined.
// Since it's not possible to take address of a destructor, we move the
// actual destructor code into the separate Dtor function and force the
// compiler to emit this function even if it's inline by taking its address.
ABSL_ATTRIBUTE_UNUSED volatile auto dtor = &Mutex::Dtor;
}

void EnableMutexInvariantDebugging(bool enabled) {
Expand Down Expand Up @@ -2478,6 +2470,12 @@ void CondVar::EnableDebugLog(const char* name) {
UnrefSynchEvent(e);
}

CondVar::~CondVar() {
if ((cv_.load(std::memory_order_relaxed) & kCvEvent) != 0) {
ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin);
}
}

// Remove thread s from the list of waiters on this condition variable.
void CondVar::Remove(PerThreadSynch* s) {
SchedulingGuard::ScopedDisable disable_rescheduling;
Expand Down
11 changes: 1 addition & 10 deletions absl/synchronization/mutex.h
Original file line number Diff line number Diff line change
Expand Up @@ -536,7 +536,6 @@ class ABSL_LOCKABLE Mutex {
void Block(base_internal::PerThreadSynch* s);
// Wake a thread; return successor.
base_internal::PerThreadSynch* Wakeup(base_internal::PerThreadSynch* w);
void Dtor();

friend class CondVar; // for access to Trans()/Fer().
void Trans(MuHow how); // used for CondVar->Mutex transfer
Expand Down Expand Up @@ -910,6 +909,7 @@ class CondVar {
// A `CondVar` allocated on the heap or on the stack can use the this
// constructor.
CondVar();
~CondVar();

// CondVar::Wait()
//
Expand Down Expand Up @@ -1061,15 +1061,6 @@ inline Mutex::Mutex() : mu_(0) {

inline constexpr Mutex::Mutex(absl::ConstInitType) : mu_(0) {}

#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL)
inline Mutex::~Mutex() { Dtor(); }
#endif

#if defined(NDEBUG) && !defined(ABSL_HAVE_THREAD_SANITIZER)
// Use default (empty) destructor in release build for performance reasons.
inline void Mutex::Dtor() {}
#endif

inline CondVar::CondVar() : cv_(0) {}

// static
Expand Down
27 changes: 0 additions & 27 deletions absl/synchronization/mutex_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1709,33 +1709,6 @@ TEST(Mutex, Logging) {
logged_cv.SignalAll();
}

TEST(Mutex, LoggingAddressReuse) {
// Repeatedly re-create a Mutex with debug logging at the same address.
alignas(absl::Mutex) char storage[sizeof(absl::Mutex)];
auto invariant =
+[](void *alive) { EXPECT_TRUE(*static_cast<bool *>(alive)); };
constexpr size_t kIters = 10;
bool alive[kIters] = {};
for (size_t i = 0; i < kIters; ++i) {
absl::Mutex *mu = new (storage) absl::Mutex;
alive[i] = true;
mu->EnableDebugLog("Mutex");
mu->EnableInvariantDebugging(invariant, &alive[i]);
mu->Lock();
mu->Unlock();
mu->~Mutex();
alive[i] = false;
}
}

TEST(Mutex, LoggingBankrupcy) {
// Test the case with too many live Mutexes with debug logging.
std::vector<absl::Mutex> mus(1 << 20);
for (auto &mu : mus) {
mu.EnableDebugLog("Mutex");
}
}

// --------------------------------------------------------

// Generate the vector of thread counts for tests parameterized on thread count.
Expand Down

0 comments on commit 89d2caa

Please sign in to comment.