===================================================================
@@ -0,0 +1,98 @@
+## Process this file with automake to produce Makefile.in
+
+ACLOCAL_AMFLAGS = -I .. -I ../config
+SUBDIRS = testsuite
+
+## May be used by toolexeclibdir.
+gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
+
+abi_version = -fabi-version=4
+
+config_path = @config_path@
+search_path = $(addprefix $(top_srcdir)/config/, $(config_path))
$(top_srcdir)
+
+fincludedir = $(libdir)/gcc/$(target_alias)/$(gcc_version)/finclude
+libsubincludedir = $(libdir)/gcc/$(target_alias)/$(gcc_version)/include
+
+vpath % $(strip $(search_path))
+
+AM_CPPFLAGS = $(addprefix -I, $(search_path))
+AM_CFLAGS = $(XCFLAGS)
+AM_CXXFLAGS = -std=gnu++0x -funwind-tables -fno-exceptions -fno-rtti \
+ $(XCFLAGS) $(abi_version)
+AM_CCASFLAGS = $(XCFLAGS)
+AM_LDFLAGS = $(XLDFLAGS) $(SECTION_LDFLAGS) $(OPT_LDFLAGS)
+
+toolexeclib_LTLIBRARIES = libitm.la
+nodist_toolexeclib_HEADERS = libitm.spec
+
+if LIBITM_BUILD_VERSIONED_SHLIB
+libitm_version_script = -Wl,--version-script,$(top_srcdir)/libitm.map
+else
+libitm_version_script =
+endif
+libitm_version_info = -version-info $(libtool_VERSION)
+
+# Force link with C, not C++. For now, while we're using C++ we don't
+# want or need libstdc++.
+libitm_la_LINK = $(LINK)
+libitm_la_LDFLAGS = $(libitm_version_info) $(libitm_version_script) \
+ -no-undefined
+
+libitm_la_SOURCES = \
+ aatree.cc alloc.cc alloc_c.cc alloc_cpp.cc barrier.cc beginend.cc \
+ clone.cc cacheline.cc cachepage.cc eh_cpp.cc local.cc \
+ query.cc retry.cc rwlock.cc useraction.cc util.cc \
+ sjlj.S tls.cc method-serial.cc method-gl.cc
+
+if ARCH_X86
+libitm_la_SOURCES += x86_sse.cc x86_avx.cc
+x86_sse.lo : XCFLAGS += -msse
+x86_avx.lo : XCFLAGS += -mavx
+endif
+
+if ARCH_FUTEX
+libitm_la_SOURCES += futex.cc
+endif
+
+# Automake Documentation:
+# If your package has Texinfo files in many directories, you can use the
+# variable TEXINFO_TEX to tell Automake where to find the canonical
+# `texinfo.tex' for your package. The value of this variable should be
+# the relative path from the current `Makefile.am' to `texinfo.tex'.
+TEXINFO_TEX = ../gcc/doc/include/texinfo.tex
+
+# Defines info, dvi, pdf and html targets
+MAKEINFOFLAGS = -I $(srcdir)/../gcc/doc/include
+info_TEXINFOS = libitm.texi
+
+# AM_CONDITIONAL on configure option --generated-files-in-srcdir
+if GENINSRC
+STAMP_GENINSRC = stamp-geninsrc
+else
+STAMP_GENINSRC =
+endif
+
+# AM_CONDITIONAL on configure check ACX_CHECK_PROG_VER([MAKEINFO])
+if BUILD_INFO
+STAMP_BUILD_INFO = stamp-build-info
+else
+STAMP_BUILD_INFO =
+endif
+
+
+all-local: $(STAMP_GENINSRC)
+
+stamp-geninsrc: libitm.info
+ cp -p $(top_builddir)/libitm.info $(srcdir)/libitm.info
+ @touch $@
+
+libitm.info: $(STAMP_BUILD_INFO)
+
+stamp-build-info: libitm.texi
+ $(MAKEINFO) $(AM_MAKEINFOFLAGS) $(MAKEINFOFLAGS) -I $(srcdir) -o
libitm.info $(srcdir)/libitm.texi
+ @touch $@
+
+
+CLEANFILES = $(STAMP_GENINSRC) $(STAMP_BUILD_INFO) libitm.info
+MAINTAINERCLEANFILES = $(srcdir)/libitm.info
===================================================================
@@ -0,0 +1,365 @@
+/* Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "libitm_i.h"
+
+using namespace GTM;
+
+static void
+do_memcpy (uintptr_t idst, uintptr_t isrc, size_t size,
+ abi_dispatch::lock_type W, abi_dispatch::lock_type R)
+{
+ abi_dispatch *disp = abi_disp();
+ // The position in the destination cacheline where *IDST starts.
+ uintptr_t dofs = idst & (CACHELINE_SIZE - 1);
+ // The position in the source cacheline where *ISRC starts.
+ uintptr_t sofs = isrc & (CACHELINE_SIZE - 1);
+ const gtm_cacheline *src
+ = reinterpret_cast<const gtm_cacheline *>(isrc & -CACHELINE_SIZE);
+ gtm_cacheline *dst
+ = reinterpret_cast<gtm_cacheline *>(idst & -CACHELINE_SIZE);
+ const gtm_cacheline *sline;
+ abi_dispatch::mask_pair dpair;
+
+ if (size == 0)
+ return;
+
+ // If both SRC and DST data start at the same position in the cachelines,
+ // we can easily copy the data in tandem, cacheline by cacheline...
+ if (dofs == sofs)
+ {
+ // We copy the data in three stages:
+
+ // (a) Copy stray bytes at the beginning that are smaller than a
+ // cacheline.
+ if (sofs != 0)
+ {
+ size_t sleft = CACHELINE_SIZE - sofs;
+ size_t min = (size <= sleft ? size : sleft);
+
+ dpair = disp->write_lock(dst, W);
+ sline = disp->read_lock(src, R);
+ *dpair.mask |= (((gtm_cacheline_mask)1 << min) - 1) << sofs;
+ memcpy (&dpair.line->b[sofs], &sline->b[sofs], min);
+ dst++;
+ src++;
+ size -= min;
+ }
+
+ // (b) Copy subsequent cacheline sized chunks.
+ while (size >= CACHELINE_SIZE)
+ {
+ dpair = disp->write_lock(dst, W);
+ sline = disp->read_lock(src, R);
+ *dpair.mask = -1;
+ *dpair.line = *sline;
+ dst++;
+ src++;
+ size -= CACHELINE_SIZE;
+ }
+
+ // (c) Copy anything left over.
+ if (size != 0)
+ {
+ dpair = disp->write_lock(dst, W);
+ sline = disp->read_lock(src, R);
+ *dpair.mask |= ((gtm_cacheline_mask)1 << size) - 1;
+ memcpy (dpair.line, sline, size);
+ }
+ }
+ // ... otherwise, we must copy the data in disparate hunks using
+ // temporary storage.
+ else
+ {
+ gtm_cacheline c;
+ size_t sleft = CACHELINE_SIZE - sofs;
+
+ sline = disp->read_lock(src, R);
+
+ // As above, we copy the data in three stages:
+
+ // (a) Copy stray bytes at the beginning that are smaller than a
+ // cacheline.
+ if (dofs != 0)
+ {
+ size_t dleft = CACHELINE_SIZE - dofs;
+ size_t min = (size <= dleft ? size : dleft);
+
+ dpair = disp->write_lock(dst, W);
+ *dpair.mask |= (((gtm_cacheline_mask)1 << min) - 1) << dofs;
+
+ // If what's left in the source cacheline will fit in the
+ // rest of the destination cacheline, straight up copy it.
+ if (min <= sleft)
+ {
+ memcpy (&dpair.line->b[dofs], &sline->b[sofs], min);
+ sofs += min;
+ }
+ // Otherwise, we need more bits from the source cacheline
+ // that are available. Piece together what we need from
+ // contiguous (source) cachelines, into temp space, and copy
+ // it over.
+ else
+ {
+ memcpy (&c, &sline->b[sofs], sleft);
+ sline = disp->read_lock(++src, R);
+ sofs = min - sleft;
+ memcpy (&c.b[sleft], sline, sofs);
+ memcpy (&dpair.line->b[dofs], &c, min);
+ }
+ sleft = CACHELINE_SIZE - sofs;
+
+ dst++;
+ size -= min;
+ }
+
+ // (b) Copy subsequent cacheline sized chunks.
+ while (size >= CACHELINE_SIZE)
+ {
+ // We have a full (destination) cacheline where to put the
+ // data, but to get to the corresponding cacheline sized
+ // chunk in the source, we have to piece together two
+ // contiguous source cachelines.
+
+ memcpy (&c, &sline->b[sofs], sleft);
+ sline = disp->read_lock(++src, R);
+ memcpy (&c.b[sleft], sline, sofs);
+
+ dpair = disp->write_lock(dst, W);
+ *dpair.mask = -1;
+ *dpair.line = c;
+
+ dst++;
+ size -= CACHELINE_SIZE;
+ }
+
+ // (c) Copy anything left over.
+ if (size != 0)
+ {
+ dpair = disp->write_lock(dst, W);
+ *dpair.mask |= ((gtm_cacheline_mask)1 << size) - 1;
+ // If what's left to copy is entirely in the remaining
+ // source cacheline, do it.
+ if (size <= sleft)
+ memcpy (dpair.line, &sline->b[sofs], size);
+ // Otherwise, piece together the remaining bits, and copy.
+ else
+ {
+ memcpy (&c, &sline->b[sofs], sleft);
+ sline = disp->read_lock(++src, R);
+ memcpy (&c.b[sleft], sline, size - sleft);
+ memcpy (dpair.line, &c, size);
+ }
+ }
+ }
+}
+
+static void
+do_memmove (uintptr_t idst, uintptr_t isrc, size_t size,
+ abi_dispatch::lock_type W, abi_dispatch::lock_type R)
+{
+ abi_dispatch *disp = abi_disp();
+ uintptr_t dleft, sleft, sofs, dofs;
+ const gtm_cacheline *sline;
+ abi_dispatch::mask_pair dpair;
+
+ if (size == 0)
+ return;
+
+ /* The co-aligned memmove below doesn't work for DST == SRC, so filter
+ that out. It's tempting to just return here, as this is a no-op move.
+ However, our caller has the right to expect the locks to be acquired
+ as advertized. */
+ if (__builtin_expect (idst == isrc, 0))
+ {
+ /* If the write lock is already acquired, nothing to do. */
+ if (W == abi_dispatch::WaW)
+ return;
+ /* If the destination is protected, acquire a write lock. */
+ if (W != abi_dispatch::NOLOCK)
+ R = abi_dispatch::RfW;
+ /* Notice serial mode, where we don't acquire locks at all. */
+ if (R == abi_dispatch::NOLOCK)
+ return;
+
+ idst = isrc + size;
+ for (isrc &= -CACHELINE_SIZE; isrc < idst; isrc += CACHELINE_SIZE)
+ disp->read_lock(reinterpret_cast<const gtm_cacheline *>(isrc), R);
+ return;
+ }
+
+ /* Fall back to memcpy if the implementation above can handle it. */
+ if (idst < isrc || isrc + size <= idst)
+ {
+ do_memcpy (idst, isrc, size, W, R);
+ return;
+ }
+
+ /* What remains requires a backward copy from the end of the blocks. */
+ idst += size;
+ isrc += size;
+ dofs = idst & (CACHELINE_SIZE - 1);
+ sofs = isrc & (CACHELINE_SIZE - 1);
+ dleft = CACHELINE_SIZE - dofs;
+ sleft = CACHELINE_SIZE - sofs;
+
+ gtm_cacheline *dst
+ = reinterpret_cast<gtm_cacheline *>(idst & -CACHELINE_SIZE);
+ const gtm_cacheline *src
+ = reinterpret_cast<const gtm_cacheline *>(isrc & -CACHELINE_SIZE);
+ if (dofs == 0)
+ dst--;
+ if (sofs == 0)
+ src--;
+
+ if (dofs == sofs)
+ {
+ /* Since DST and SRC are co-aligned, and we didn't use the memcpy
+ optimization above, that implies that SIZE > CACHELINE_SIZE. */
+ if (sofs != 0)
+ {
+ dpair = disp->write_lock(dst, W);
+ sline = disp->read_lock(src, R);
+ *dpair.mask |= ((gtm_cacheline_mask)1 << sleft) - 1;
+ memcpy (dpair.line, sline, sleft);
+ dst--;
+ src--;
+ size -= sleft;
+ }
+
+ while (size >= CACHELINE_SIZE)
+ {
+ dpair = disp->write_lock(dst, W);
+ sline = disp->read_lock(src, R);
+ *dpair.mask = -1;
+ *dpair.line = *sline;
+ dst--;
+ src--;
+ size -= CACHELINE_SIZE;
+ }
+
+ if (size != 0)
+ {
+ size_t ofs = CACHELINE_SIZE - size;
+ dpair = disp->write_lock(dst, W);
+ sline = disp->read_lock(src, R);
+ *dpair.mask |= (((gtm_cacheline_mask)1 << size) - 1) << ofs;
+ memcpy (&dpair.line->b[ofs], &sline->b[ofs], size);
+ }
+ }
+ else
+ {
+ gtm_cacheline c;
+
+ sline = disp->read_lock(src, R);
+ if (dofs != 0)
+ {
+ size_t min = (size <= dofs ? size : dofs);
+
+ if (min <= sofs)
+ {
+ sofs -= min;
+ memcpy (&c, &sline->b[sofs], min);
+ }
+ else
+ {
+ size_t min_ofs = min - sofs;
+ memcpy (&c.b[min_ofs], sline, sofs);
+ sline = disp->read_lock(--src, R);
+ sofs = CACHELINE_SIZE - min_ofs;
+ memcpy (&c, &sline->b[sofs], min_ofs);
+ }
+
+ dofs = dleft - min;
+ dpair = disp->write_lock(dst, W);
+ *dpair.mask |= (((gtm_cacheline_mask)1 << min) - 1) << dofs;
+ memcpy (&dpair.line->b[dofs], &c, min);
+
+ sleft = CACHELINE_SIZE - sofs;
+ dst--;
+ size -= min;
+ }
+
+ while (size >= CACHELINE_SIZE)
+ {
+ memcpy (&c.b[sleft], sline, sofs);
+ sline = disp->read_lock(--src, R);
+ memcpy (&c, &sline->b[sofs], sleft);
+
+ dpair = disp->write_lock(dst, W);
+ *dpair.mask = -1;
+ *dpair.line = c;
+
+ dst--;
+ size -= CACHELINE_SIZE;
+ }
+
+ if (size != 0)
+ {
+ dofs = CACHELINE_SIZE - size;
+
+ memcpy (&c.b[sleft], sline, sofs);
+ if (sleft > dofs)
+ {
+ sline = disp->read_lock(--src, R);
+ memcpy (&c, &sline->b[sofs], sleft);
+ }
+
+ dpair = disp->write_lock(dst, W);
+ *dpair.mask |= (gtm_cacheline_mask)-1 << dofs;
+ memcpy (&dpair.line->b[dofs], &c.b[dofs], size);
+ }
+ }
+}
+
+#define ITM_MEM_DEF(NAME, READ, WRITE) \
+void ITM_REGPARM _ITM_memcpy##NAME(void *dst, const void *src, size_t
size) \
+{ \
+ do_memcpy ((uintptr_t)dst, (uintptr_t)src, size, \
+ abi_dispatch::WRITE, abi_dispatch::READ); \
+} \
+void ITM_REGPARM _ITM_memmove##NAME(void *dst, const void *src, size_t
size) \
+{ \
+ do_memmove ((uintptr_t)dst, (uintptr_t)src, size, \
+ abi_dispatch::WRITE, abi_dispatch::READ); \
+}
+
+ITM_MEM_DEF(RnWt, NOLOCK, W)
+ITM_MEM_DEF(RnWtaR, NOLOCK, WaR)
+ITM_MEM_DEF(RnWtaW, NOLOCK, WaW)
+
+ITM_MEM_DEF(RtWn, R, NOLOCK)
+ITM_MEM_DEF(RtWt, R, W)
+ITM_MEM_DEF(RtWtaR, R, WaR)
+ITM_MEM_DEF(RtWtaW, R, WaW)
+
+ITM_MEM_DEF(RtaRWn, RaR, NOLOCK)
+ITM_MEM_DEF(RtaRWt, RaR, W)
+ITM_MEM_DEF(RtaRWtaR, RaR, WaR)
+ITM_MEM_DEF(RtaRWtaW, RaR, WaW)
+
+ITM_MEM_DEF(RtaWWn, RaW, NOLOCK)
+ITM_MEM_DEF(RtaWWt, RaW, W)
+ITM_MEM_DEF(RtaWWtaR, RaW, WaR)
+ITM_MEM_DEF(RtaWWtaW, RaW, WaW)
===================================================================
@@ -0,0 +1,129 @@
+/* Copyright (C) 2008, 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "libitm_i.h"
+
+namespace GTM HIDDEN {
+
+struct gtm_undolog_entry
+{
+ void *addr;
+ size_t len;
+ char saved[];
+};
+
+
+void
+gtm_thread::commit_undolog ()
+{
+ size_t i, n = undolog.size();
+
+ if (n > 0)
+ {
+ for (i = 0; i < n; ++i)
+ free (undolog[i]);
+ this->undolog.clear();
+ }
+}
+
+void
+gtm_thread::rollback_undolog (size_t until_size)
+{
+ size_t i, n = undolog.size();
+
+ if (n > 0)
+ {
+ for (i = n; i-- > until_size; )
+ {
+ gtm_undolog_entry *u = *undolog.pop();
+ if (u)
+ {
+ memcpy (u->addr, u->saved, u->len);
+ free (u);
+ }
+ }
+ }
+}
+
+/* Forget any references to PTR in the local log. */
+
+void
+gtm_thread::drop_references_undolog (const void *ptr, size_t len)
+{
+ size_t i, n = undolog.size();
+
+ if (n > 0)
+ {
+ for (i = n; i > 0; i--)
+ {
+ gtm_undolog_entry *u = undolog[i];
+ /* ?? Do we need such granularity, or can we get away with
+ just comparing PTR and LEN. ?? */
+ if ((const char *)u->addr >= (const char *)ptr
+ && ((const char *)u->addr + u->len <= (const char *)ptr + len))
+ {
+ free (u);
+ undolog[i] = NULL;
+ }
+ }
+ }
+}
+
+void ITM_REGPARM
+GTM_LB (const void *ptr, size_t len)
+{
+ gtm_thread *tx = gtm_thr();
+ gtm_undolog_entry *undo;
+
+ undo = (gtm_undolog_entry *)
+ xmalloc (sizeof (struct gtm_undolog_entry) + len);
+ undo->addr = (void *) ptr;
+ undo->len = len;
+
+ tx->undolog.push()[0] = undo;
+
+ memcpy (undo->saved, ptr, len);
+}
+
+} // namespace GTM
+
+using namespace GTM;
+
+void _ITM_LB (const void *ptr, size_t len) ITM_REGPARM
+ __attribute__((alias("GTM_LB")));
+
+#define ITM_LOG_DEF(T) \
+void ITM_REGPARM _ITM_L##T (const _ITM_TYPE_##T *ptr) \
+{ GTM_LB (ptr, sizeof (*ptr)); }
+
+ITM_LOG_DEF(U1)
+ITM_LOG_DEF(U2)
+ITM_LOG_DEF(U4)
+ITM_LOG_DEF(U8)
+ITM_LOG_DEF(F)
+ITM_LOG_DEF(D)
+ITM_LOG_DEF(E)
+ITM_LOG_DEF(CF)
+ITM_LOG_DEF(CD)
+ITM_LOG_DEF(CE)
===================================================================
@@ -0,0 +1,302 @@
+/* Copyright (C) 2008, 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* The following are internal implementation functions and definitions.
+ To distinguish them from those defined by the Intel ABI, they all
+ begin with GTM/gtm. */
+
+#ifndef LIBITM_I_H
+#define LIBITM_I_H 1
+
+#include "libitm.h"
+#include "config.h"
+
+#include <cassert>
+#include <cstdlib>
+#include <cstring>
+#include <unwind.h>
+#include <type_traits>
+
+#include "common.h"
+
+namespace GTM HIDDEN {
+
+using namespace std;
+
+// A helper template for accessing an unsigned integral of SIZE bytes.
+template<size_t SIZE> struct sized_integral { };
+template<> struct sized_integral<1> { typedef uint8_t type; };
+template<> struct sized_integral<2> { typedef uint16_t type; };
+template<> struct sized_integral<4> { typedef uint32_t type; };
+template<> struct sized_integral<8> { typedef uint64_t type; };
+
+typedef unsigned int gtm_word __attribute__((mode (word)));
+
+// These values are given to GTM_restart_transaction and indicate the
+// reason for the restart. The reason is used to decide what STM
+// implementation should be used during the next iteration.
+enum gtm_restart_reason
+{
+ RESTART_REALLOCATE,
+ RESTART_LOCKED_READ,
+ RESTART_LOCKED_WRITE,
+ RESTART_VALIDATE_READ,
+ RESTART_VALIDATE_WRITE,
+ RESTART_VALIDATE_COMMIT,
+ RESTART_SERIAL_IRR,
+ RESTART_NOT_READONLY,
+ RESTART_CLOSED_NESTING,
+ RESTART_INIT_METHOD_GROUP,
+ NUM_RESTARTS,
+ NO_RESTART = NUM_RESTARTS
+};
+
+} // namespace GTM
+
+#include "target.h"
+#include "rwlock.h"
+#include "aatree.h"
+#include "cacheline.h"
+#include "cachepage.h"
+#include "stmlock.h"
+#include "dispatch.h"
+#include "containers.h"
+
+namespace GTM HIDDEN {
+
+// This type is private to alloc.c, but needs to be defined so that
+// the template used inside gtm_thread can instantiate.
+struct gtm_alloc_action
+{
+ void (*free_fn)(void *);
+ bool allocated;
+};
+
+// This type is private to local.c.
+struct gtm_undolog_entry;
+
+struct gtm_thread;
+
+// A transaction checkpoint: data that has to saved and restored when doing
+// closed nesting.
+struct gtm_transaction_cp
+{
+ gtm_jmpbuf jb;
+ size_t undolog_size;
+ aa_tree<uintptr_t, gtm_alloc_action> alloc_actions;
+ size_t user_actions_size;
+ _ITM_transactionId_t id;
+ uint32_t prop;
+ uint32_t cxa_catch_count;
+ void *cxa_unthrown;
+ // We might want to use a different but compatible dispatch method for
+ // a nested transaction.
+ abi_dispatch *disp;
+ // Nesting level of this checkpoint (1 means that this is a checkpoint of
+ // the outermost transaction).
+ uint32_t nesting;
+
+ void save(gtm_thread* tx);
+ void commit(gtm_thread* tx);
+};
+
+// Contains all thread-specific data required by the entire library.
+// This includes all data relevant to a single transaction. Because most
+// thread-specific data is about the current transaction, we also refer to
+// the transaction-specific parts of gtm_thread as "the transaction" (the
+// same applies to names of variables and arguments).
+// All but the shared part of this data structure are thread-local data.
+// gtm_thread could be split into transaction-specific structures and other
+// per-thread data (with those parts then nested in gtm_thread), but this
+// would make it harder to later rearrange individual members to
optimize data
+// accesses. Thus, for now we keep one flat object, and will only split
it if
+// the code gets too messy.
+struct gtm_thread
+{
+
+ struct user_action
+ {
+ _ITM_userCommitFunction fn;
+ void *arg;
+ bool on_commit;
+ _ITM_transactionId_t resuming_id;
+ };
+
+ // The jump buffer by which GTM_longjmp restarts the transaction.
+ // This field *must* be at the beginning of the transaction.
+ gtm_jmpbuf jb;
+
+ // Data used by local.c for the undo log for both local and shared
memory.
+ vector<gtm_undolog_entry*> undolog;
+
+ // Data used by alloc.c for the malloc/free undo log.
+ aa_tree<uintptr_t, gtm_alloc_action> alloc_actions;
+
+ // Data used by useraction.c for the user-defined commit/abort handlers.
+ vector<user_action> user_actions;
+
+ // A numerical identifier for this transaction.
+ _ITM_transactionId_t id;
+
+ // The _ITM_codeProperties of this transaction as given by the compiler.
+ uint32_t prop;
+
+ // The nesting depth for subsequently started transactions. This variable
+ // will be set to 1 when starting an outermost transaction.
+ uint32_t nesting;
+
+ // Set if this transaction owns the serial write lock.
+ // Can be reset only when restarting the outermost transaction.
+ static const uint32_t STATE_SERIAL = 0x0001;
+ // Set if the serial-irrevocable dispatch table is installed.
+ // Implies that no logging is being done, and abort is not possible.
+ // Can be reset only when restarting the outermost transaction.
+ static const uint32_t STATE_IRREVOCABLE = 0x0002;
+
+ // A bitmask of the above.
+ uint32_t state;
+
+ // In order to reduce cacheline contention on global_tid during
+ // beginTransaction, we allocate a block of 2**N ids to the thread
+ // all at once. This number is the next value to be allocated from
+ // the block, or 0 % 2**N if no such block is allocated.
+ _ITM_transactionId_t local_tid;
+
+ // Data used by eh_cpp.c for managing exceptions within the transaction.
+ uint32_t cxa_catch_count;
+ void *cxa_unthrown;
+ void *eh_in_flight;
+
+ // Checkpoints for closed nesting.
+ vector<gtm_transaction_cp> parent_txns;
+
+ // Data used by retry.c for deciding what STM implementation should
+ // be used for the next iteration of the transaction.
+ // Only restart_total is reset to zero when the transaction commits, the
+ // other counters are total values for all previously executed
transactions.
+ uint32_t restart_reason[NUM_RESTARTS];
+ uint32_t restart_total;
+
+ // *** The shared part of gtm_thread starts here. ***
+ // Shared state is on separate cachelines to avoid false sharing with
+ // thread-local parts of gtm_thread.
+
+ // Points to the next thread in the list of all threads.
+ gtm_thread *next_thread __attribute__((__aligned__(HW_CACHELINE_SIZE)));
+
+ // If this transaction is inactive, shared_state is ~0. Otherwise,
this is
+ // an active or serial transaction.
+ gtm_word shared_state;
+
+ // The lock that provides access to serial mode. Non-serialized
+ // transactions acquire read locks; a serialized transaction aquires
+ // a write lock.
+ static gtm_rwlock serial_lock;
+
+ // The head of the list of all threads' transactions.
+ static gtm_thread *list_of_threads;
+ // The number of all registered threads.
+ static unsigned number_of_threads;
+
+ // In alloc.cc
+ void commit_allocations (bool, aa_tree<uintptr_t, gtm_alloc_action>*);
+ void record_allocation (void *, void (*)(void *));
+ void forget_allocation (void *, void (*)(void *));
+ void drop_references_allocations (const void *ptr)
+ {
+ this->alloc_actions.erase((uintptr_t) ptr);
+ }
+
+ // In beginend.cc
+ void rollback (gtm_transaction_cp *cp = 0, bool aborting = false);
+ bool trycommit ();
+ void restart (gtm_restart_reason) ITM_NORETURN;
+
+ gtm_thread();
+ ~gtm_thread();
+
+ static void *operator new(size_t);
+ static void operator delete(void *);
+
+ // Invoked from assembly language, thus the "asm" specifier on
+ // the name, avoiding complex name mangling.
+ static uint32_t begin_transaction(uint32_t, const gtm_jmpbuf *)
+ __asm__("GTM_begin_transaction") ITM_REGPARM;
+
+ // In eh_cpp.cc
+ void revert_cpp_exceptions (gtm_transaction_cp *cp = 0);
+
+ // In local.cc
+ void commit_undolog (void);
+ void rollback_undolog (size_t until_size = 0);
+ void drop_references_undolog (const void *, size_t);
+
+ // In retry.cc
+ // Must be called outside of transactions (i.e., after rollback).
+ void decide_retry_strategy (gtm_restart_reason);
+ abi_dispatch* decide_begin_dispatch (uint32_t prop);
+ void number_of_threads_changed(unsigned previous, unsigned now);
+ // Must be called from serial mode. Does not call set_abi_disp().
+ void set_default_dispatch(abi_dispatch* disp);
+
+ // In method-serial.cc
+ void serialirr_mode ();
+
+ // In useraction.cc
+ void rollback_user_actions (size_t until_size = 0);
+ void commit_user_actions ();
+};
+
+} // namespace GTM
+
+#include "tls.h"
+
+namespace GTM HIDDEN {
+
+// An unscaled count of the number of times we should spin attempting to
+// acquire locks before we block the current thread and defer to the OS.
+// This variable isn't used when the standard POSIX lock implementations
+// are used.
+extern uint64_t gtm_spin_count_var;
+
+extern "C" uint32_t GTM_longjmp (const gtm_jmpbuf *, uint32_t, uint32_t)
+ ITM_NORETURN ITM_REGPARM;
+
+extern "C" void GTM_LB (const void *, size_t) ITM_REGPARM;
+
+extern void GTM_error (const char *fmt, ...)
+ __attribute__((format (printf, 1, 2)));
+extern void GTM_fatal (const char *fmt, ...)
+ __attribute__((noreturn, format (printf, 1, 2)));
+
+extern abi_dispatch *dispatch_serial();
+extern abi_dispatch *dispatch_serialirr();
+extern abi_dispatch *dispatch_serialirr_onwrite();
+extern abi_dispatch *dispatch_gl_wt();
+
+extern gtm_cacheline_mask gtm_mask_stack(gtm_cacheline *,
gtm_cacheline_mask);
+
+} // namespace GTM
+
+#endif // LIBITM_I_H
===================================================================
@@ -0,0 +1,63 @@
+/* Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* The following are internal implementation functions and definitions.
+ To distinguish them from those defined by the Intel ABI, they all
+ begin with GTM/gtm. */
+
+#ifndef COMMON_H
+#define COMMON_H 1
+
+#define UNUSED __attribute__((unused))
+#define ALWAYS_INLINE __attribute__((always_inline))
+#ifdef HAVE_ATTRIBUTE_VISIBILITY
+# define HIDDEN __attribute__((visibility("hidden")))
+#else
+# define HIDDEN
+#endif
+
+#define likely(X) __builtin_expect((X) != 0, 1)
+#define unlikely(X) __builtin_expect((X), 0)
+
+namespace GTM HIDDEN {
+
+// Locally defined protected allocation functions.
+//
+// To avoid dependency on libstdc++ new/delete, as well as to not
+// interfere with the wrapping of the global new/delete we wrap for
+// the user in alloc_cpp.cc, use class-local versions that defer
+// to malloc/free. Recall that operator new/delete does not go through
+// normal lookup and so we cannot simply inject a version into the
+// GTM namespace.
+// If separate_cl is true, the allocator will try to return memory that
is on
+// cache lines that are not shared with any object used by another thread.
+extern void * xmalloc (size_t s, bool separate_cl = false)
+ __attribute__((malloc, nothrow));
+extern void * xrealloc (void *p, size_t s, bool separate_cl = false)
+ __attribute__((malloc, nothrow));
+
+} // namespace GTM