See the added overview comments in eh_cpp.cc. This is still lacking updated docs in the ABI spec, which I can add later. We don't need to change __cxa_tm_cleanup in libsupc++, but don't use the first two arguments of it anymore, which we may want to document too.
Thoughts?
commit 0a67dc5a13fd17a24fc667a251d000a73cd5159e Author: Torvald Riegel <trie...@redhat.com> Date: Tue Nov 3 15:38:22 2015 +0100 Support __cxa_free_exception and fix exception handling. diff --git a/libitm/beginend.cc b/libitm/beginend.cc index c3ed11b..86f7b39 100644 --- a/libitm/beginend.cc +++ b/libitm/beginend.cc @@ -132,6 +132,8 @@ GTM::gtm_thread::gtm_thread () number_of_threads_changed(number_of_threads - 1, number_of_threads); serial_lock.write_unlock (); + init_cpp_exceptions (); + if (pthread_once(&thr_release_once, thread_exit_init)) GTM_fatal("Initializing thread release TLS key failed."); // Any non-null value is sufficient to trigger destruction of this @@ -383,6 +385,11 @@ GTM::gtm_thread::begin_transaction (uint32_t prop, const gtm_jmpbuf *jb) #endif } + // Log the number of uncaught exceptions if we might have to roll back this + // state. + if (tx->cxa_uncaught_count_ptr != 0) + tx->cxa_uncaught_count = *tx->cxa_uncaught_count_ptr; + // Run dispatch-specific restart code. Retry until we succeed. GTM::gtm_restart_reason rr; while ((rr = disp->begin_or_restart()) != NO_RESTART) @@ -411,7 +418,7 @@ GTM::gtm_transaction_cp::save(gtm_thread* tx) id = tx->id; prop = tx->prop; cxa_catch_count = tx->cxa_catch_count; - cxa_unthrown = tx->cxa_unthrown; + cxa_uncaught_count = tx->cxa_uncaught_count; disp = abi_disp(); nesting = tx->nesting; } @@ -583,7 +590,6 @@ GTM::gtm_thread::trycommit () undolog.commit (); // Reset further transaction state. cxa_catch_count = 0; - cxa_unthrown = NULL; restart_total = 0; // Ensure privatization safety, if necessary. diff --git a/libitm/eh_cpp.cc b/libitm/eh_cpp.cc index a86dbf1..e28c057 100644 --- a/libitm/eh_cpp.cc +++ b/libitm/eh_cpp.cc @@ -26,6 +26,50 @@ using namespace GTM; +/* Exceptions can exist in three phases: (1) after having been allocated by + __cxa_allocate_exception but before being handed off to __cxa_throw, + (2) when they are in flight, so between __cxa_throw and __cxa_begin_catch, + and (3) when they are being handled (between __cxa_begin_catch and + __cxa_end_catch). + + We can get aborts in all three phases, for example in (1) during + construction of the exception object, or in (2) during destructors called + while unwinding the stack. The transaction that created an exception + object can commit in phase (2) but not in phases (1) and (3) because both + throw expressions and catch clauses are properly nested wrt transactions. + + We handle phase (1) by dealing with exception objects similar to how we + deal with other (de)allocations, which also ensures that we can have more + than one exception object allocated at the same time (e.g., if the + throw expression itself throws an exception and thus calls + __cxa_allocate_exception). However, on the call to __cxa_begin_catch + we hand off the exception to the special handling of phase (3) and + remove the undo log entry of the allocation. Note that if the allocation + happened outside of this transaction, we do not need to do anything. + + When an exception reaches phase (2) due to a call to __cxa_throw, the count + of uncaught exceptions is incremented. We roll back this effect by saving + and restoring this number in the structure returned from __cxa_get_globals. + This also takes care of increments of this count when rethrowing an + exception. + + For phase (3), we keep track of the number of times __cxa_begin_catch + has been called without a matching call to __cxa_end_catch. This count + is then used by __cxa_tm_cleanup to roll back the exception handling state + by calling __cxa_end_catch for the exceptions that have not been finished + yet (without running destructors though because we roll back the memory + anyway). + Once an exception that was allocated in this transaction enters phase (3), + it does not need to be deallocated on abort anymore because the calls to + __cxa_end_catch will take care of that. + + We require all code executed by the transaction to be transaction_safe (or + transaction_pure, or to have wrappers) if the transaction is to be rolled + back. However, we take care to not require this for transactions that + just commit; this way, transactions that enter serial mode and then call + uninstrumented code continue to work. + */ + /* Everything from libstdc++ is weak, to avoid requiring that library to be linked into plain C applications using libitm.so. */ @@ -33,85 +77,138 @@ using namespace GTM; extern "C" { +struct __cxa_eh_globals +{ + void * caughtExceptions; + unsigned int uncaughtExceptions; +}; + extern void *__cxa_allocate_exception (size_t) WEAK; +extern void __cxa_free_exception (void *) WEAK; extern void __cxa_throw (void *, void *, void *) WEAK; extern void *__cxa_begin_catch (void *) WEAK; extern void __cxa_end_catch (void) WEAK; extern void __cxa_tm_cleanup (void *, void *, unsigned int) WEAK; +extern __cxa_eh_globals *__cxa_get_globals (void) WEAK; #if !defined (HAVE_ELF_STYLE_WEAKREF) void *__cxa_allocate_exception (size_t) { return NULL; } +void __cxa_free_exception (void *) { return; } void __cxa_throw (void *, void *, void *) { return; } void *__cxa_begin_catch (void *) { return NULL; } void __cxa_end_catch (void) { return; } void __cxa_tm_cleanup (void *, void *, unsigned int) { return; } void _Unwind_DeleteException (_Unwind_Exception *) { return; } +__cxa_eh_globals *__cxa_get_globals (void) { return NULL; } #endif /* HAVE_ELF_STYLE_WEAKREF */ } +static void +free_any_exception (void *exc_ptr) +{ + // The exception could be in phase (2) and thus calling just + // _cxa_free_exception might not be sufficient. + __cxa_tm_cleanup (NULL, exc_ptr, 0); +} void * _ITM_cxa_allocate_exception (size_t size) { void *r = __cxa_allocate_exception (size); - gtm_thr()->cxa_unthrown = r; + gtm_thr()->record_allocation (r, free_any_exception); return r; } void +_ITM_cxa_free_exception (void *exc_ptr) +{ + // __cxa_free_exception can be called from user code directly if + // construction of an exception object throws another exception, in which + // case we need to roll back the initial exception. We handle this similar + // to dead allocations in that we deallocate the exception on both commit + // and abort of an outermost transaction. + gtm_thr()->forget_allocation (exc_ptr, free_any_exception); +} + +void _ITM_cxa_throw (void *obj, void *tinfo, void *dest) { - gtm_thr()->cxa_unthrown = NULL; __cxa_throw (obj, tinfo, dest); } void * _ITM_cxa_begin_catch (void *exc_ptr) { - gtm_thr()->cxa_catch_count++; + // If this exception object has been allocated by this transaction, we + // discard the undo log entry for the allocation; we are entering phase (3) + // now and will handle this exception specially. + // Note that this exception cannot have been allocated in a parent + // transaction or enclosing nontransactional block because an atomic block + // cannot contain just a catch clause but not the associated try clause. + // The exception can have been allocated in a nested transaction, in which + // case the commit of the nested transaction will have inserted the undo + // log entry of the allocation in our undo log. + // The exception can also have been allocated in a nested nontransactional + // block, but then this transaction cannot abort anymore; functions that + // are marked transaction_pure, for example, must not side-step the + // transactional exception handling we implement here. + gtm_thread *t = gtm_thr (); + t->discard_allocation (exc_ptr); + // Keep track of the number of unfinished catch handlers. + t->cxa_catch_count++; return __cxa_begin_catch (exc_ptr); } void _ITM_cxa_end_catch (void) { + // Keep track of the number of unfinished catch handlers. gtm_thr()->cxa_catch_count--; __cxa_end_catch (); } void +GTM::gtm_thread::init_cpp_exceptions () +{ + // Only save and restore the number of uncaught exceptions if this is + // actually used in the program. + if (__cxa_get_globals != NULL && __cxa_get_globals () != 0) + cxa_uncaught_count_ptr = &g->uncaughtExceptions; + else + cxa_uncaught_count_ptr = 0; +} + +void GTM::gtm_thread::revert_cpp_exceptions (gtm_transaction_cp *cp) { if (cp) { - // If rolling back a nested transaction, only clean up unthrown - // exceptions since the last checkpoint. Always reset eh_in_flight - // because it just contains the argument provided to - // _ITM_commitTransactionEH - void *unthrown = - (cxa_unthrown != cp->cxa_unthrown ? cxa_unthrown : NULL); + // If rolling back a nested transaction, only clean up incompletely + // caught exceptions since the last checkpoint. assert (cxa_catch_count >= cp->cxa_catch_count); uint32_t catch_count = cxa_catch_count - cp->cxa_catch_count; - if (unthrown || catch_count) + if (catch_count) { - __cxa_tm_cleanup (unthrown, this->eh_in_flight, catch_count); + __cxa_tm_cleanup (NULL, NULL, catch_count); cxa_catch_count = cp->cxa_catch_count; - cxa_unthrown = cp->cxa_unthrown; - this->eh_in_flight = NULL; } } else { // Both cxa_catch_count and cxa_unthrown are maximal because EH regions // and transactions are properly nested. - if (this->cxa_unthrown || this->cxa_catch_count) + if (cxa_catch_count) { - __cxa_tm_cleanup (this->cxa_unthrown, this->eh_in_flight, - this->cxa_catch_count); - this->cxa_catch_count = 0; - this->cxa_unthrown = NULL; - this->eh_in_flight = NULL; + __cxa_tm_cleanup (NULL, NULL, cxa_catch_count); + cxa_catch_count = 0; } } + // Reset the number of uncaught exceptions. Any allocations for these + // exceptions have been rolled back already, if necessary. + if (cxa_uncaught_count_ptr != 0) + *cxa_uncaught_count_ptr = cxa_uncaught_count; + // Always reset eh_in_flight because it just contains the argument provided + // to _ITM_commitTransactionEH. + eh_in_flight = NULL; } diff --git a/libitm/libitm.h b/libitm/libitm.h index 651896b..900c444 100644 --- a/libitm/libitm.h +++ b/libitm/libitm.h @@ -283,6 +283,7 @@ extern void _ITM_registerTMCloneTable (void *, size_t); extern void _ITM_deregisterTMCloneTable (void *); extern void *_ITM_cxa_allocate_exception (size_t); +extern void _ITM_cxa_free_exception (void *exc_ptr); extern void _ITM_cxa_throw (void *obj, void *tinfo, void *dest); extern void *_ITM_cxa_begin_catch (void *exc_ptr); extern void _ITM_cxa_end_catch (void); diff --git a/libitm/libitm.map b/libitm/libitm.map index ac371de..b2e1c2d 100644 --- a/libitm/libitm.map +++ b/libitm/libitm.map @@ -186,4 +186,5 @@ LIBITM_1.1 { global: _ZGTtdlPv?; _ZGTtdlPv?RKSt9nothrow_t; + _ITM_cxa_free_exception; } LIBITM_1.0; diff --git a/libitm/libitm_i.h b/libitm/libitm_i.h index bf8d4d1..f01a1ab 100644 --- a/libitm/libitm_i.h +++ b/libitm/libitm_i.h @@ -132,7 +132,7 @@ struct gtm_transaction_cp _ITM_transactionId_t id; uint32_t prop; uint32_t cxa_catch_count; - void *cxa_unthrown; + unsigned int cxa_uncaught_count; // We might want to use a different but compatible dispatch method for // a nested transaction. abi_dispatch *disp; @@ -242,7 +242,9 @@ struct gtm_thread // Data used by eh_cpp.c for managing exceptions within the transaction. uint32_t cxa_catch_count; - void *cxa_unthrown; + // If cxa_uncaught_count_ptr is 0, we don't need to roll back exceptions. + unsigned int *cxa_uncaught_count_ptr; + unsigned int cxa_uncaught_count; void *eh_in_flight; // Checkpoints for closed nesting. @@ -284,9 +286,9 @@ struct gtm_thread void record_allocation (void *, void (*)(void *)); void forget_allocation (void *, void (*)(void *)); void forget_allocation (void *, size_t, void (*)(void *, size_t)); - void drop_references_allocations (const void *ptr) + void discard_allocation (const void *ptr) { - this->alloc_actions.erase((uintptr_t) ptr); + alloc_actions.erase((uintptr_t) ptr); } // In beginend.cc @@ -306,6 +308,7 @@ struct gtm_thread static uint32_t begin_transaction(uint32_t, const gtm_jmpbuf *) __asm__(UPFX "GTM_begin_transaction") ITM_REGPARM; // In eh_cpp.cc + void init_cpp_exceptions (); void revert_cpp_exceptions (gtm_transaction_cp *cp = 0); // In retry.cc