stamp-h1
# External library (without trailing slash to allow symlinks):
-/libatomic_ops*
/pthreads-w32*
# These files are generated by autoreconf:
--- /dev/null
+# Git repo attributes.
+
+# Ensure all text files have normalized (LF) line endings in the repository.
+* text=auto
+
+# Note: "core.eol" configuration variable controls which line endings to use
+# for the normalized files in the working directory (the default is native).
--- /dev/null
+# Ignored files in libatomic_ops Git repo.
+
+Makefile
+
+/pkgconfig/atomic_ops.pc
+/pkgconfig/atomic_ops-uninstalled.pc
+/autom4te.cache/
+/config.cache
+/config.log
+/config.status
+/libatomic_ops-*
+
+*.a
+*.dll
+*.exe
+*.gcda
+*.gch
+*.gcno
+*.la
+*.lib
+*.lo
+*.o
+*.obj
+*.so
+
+/src/.deps/
+/src/.dirstamp
+/src/.libs/
+/src/config.h
+/src/config.h.in~
+/src/stamp-h1
+
+/tests/.deps/
+/tests/.dirstamp
+/tests/.libs/
+/tests/core
+/tests/list_atomic.i
+/tests/test_atomic
+/tests/test_atomic_pthreads
+/tests/test_malloc
+/tests/test_stack
+
+# External library (without trailing slash to allow symlinks):
+/pthreads-w32*
+
+# These files are generated by autoreconf:
+/aclocal.m4
+/compile
+/config.guess
+/config.sub
+/configure
+/depcomp
+/install-sh
+/missing
+/mkinstalldirs
+/src/config.h.in
+/test-driver
+Makefile.in
+
+# Generated by libtoolize:
+/libtool
+/ltmain.sh
+/m4/*.m4
+
+# These files are generated by make check:
+/tests/list_atomic.c
+/tests/test_atomic_include.h
+/tests/test*.log
+/tests/test*.trs
--- /dev/null
+Originally written by Hans Boehm, with some platform-dependent code
+imported from the Boehm-Demers-Weiser GC, where it was contributed
+by many others.
+Currently maintained by Ivan Maidanski.
+
+Andreas Tobler <a.tobler@schweiz.org>
+Andrew Agno <agno+boehmgc@agno.net>
+Bradley Smith <brad@brad-smith.co.uk>
+Bruce Mitchener <bruce.mitchener@gmail.com>
+Carlos O'Donell <carlos@baldric.uwo.ca>
+Daniel Grayson <dan@math.uiuc.edu>
+Doug Lea <dl@cs.oswego.edu>
+Earl Chew <earl_chew@agilent.com>
+Emmanuel Stapf <manus@eiffel.com>
+Gilles Talis <gilles.talis@gmail.com>
+Gregory Farnum <gregory.farnum@dreamhost.com>
+H.J. Lu <hjl.tools@gmail.com>
+Hans Boehm <boehm@acm.org>
+Hans-Peter Nilsson <hp@gcc.gnu.org>
+Ian Wienand <ianw@gelato.unsw.edu.au>
+Ivan Maidanski <ivmai@mail.ru>
+James Cowgill <james410@cowgill.org.uk>
+Jeremy Huddleston <jeremyhu@apple.com>
+Jim Marshall <jim.marshall@wbemsolutions.com>
+Joerg Wagner <wagner@it.neclab.eu>
+Linas Vepstas <linasvepstas@gmail.com>
+Luca Barbato <lu_zero@gentoo.org>
+Kochin Chang <kochinc@outlook.com>
+Maged Michael <michael@cs.rochester.edu>
+Manuel Serrano <Manuel.Serrano@inria.fr>
+Michael Hope <michael.hope@linaro.org>
+Patrick Marlier <patrick.marlier@unine.ch>
+Pavel Raiskup <praiskup@redhat.com>
+Petter Urkedal <urkedal@nbi.dk>
+Philipp Zambelli <pzamb@iicm.edu>
+Ranko Zivojnovic <ranko@spidernet.net>
+Roger Hoover <roger.hoover@gmail.com>
+Sebastian Siewior <sebastian@breakpoint.cc>
+Steve Capper <steve.capper@linaro.org>
+Takashi Yoshii <takashi.yoshii.zj@renesas.com>
+Thiemo Seufer <ica2_ts@csv.ica.uni-stuttgart.de>
+Thorsten Glaser <tg@debian.org>
+Tony Mantler <nicoya@ubb.ca>
+Yvan Roux <yvan.roux@linaro.org>
--- /dev/null
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+\f
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+\f
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+\f
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+\f
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+\f
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
--- /dev/null
+
+== [7.5.0] (development) ==
+
+* Relax shareability domain for dmb st in AO_nop_write (ARM/AArch64).
+* Use LLD and SCD instructions on mips64.
+
+
+== [7.4.4] (unset) ==
+
+* Eliminate 'variable set but not used' Cppcheck warnings in test_stack.
+* Fix makefile preventing AO_pause undefined in libatomic_ops_gpl.
+* Fix missing casts to match printf format specifier in test_atomic.
+* Fix missing output folder on making auto-generated test files (Automake).
+* Minor fix of code alignment in mips AO_compare_and_swap.
+* Restore contribution info in ChangeLog for authors not listed in git log.
+
+
+== [7.4.2] 2014-05-02 ==
+
+* Fix a typo in doc/README.txt (remove redundant "an" article).
+* Update emails/links due to project site transition.
+
+
+== [7.4.0] 2013-11-17 ==
+
+* Add and/or/xor entries to list_atomic (tests).
+* Add char/short/int/AO_double_t and dd_acquire cases to list_atomic (tests).
+* Add compile-time assertion for size of 'standard' AO_double_t.
+* Add double_store pthread-based implementation and tests.
+* Add generalized CAS primitives of char/short/int size.
+* Add generalized atomic and/or/xor operations for char/short/int types.
+* Add generalized fetch_and_add_acquire/release (for ARMv6+).
+* Add generic implementation of double_load primitives.
+* Add information about AO_ASSUME_VISTA to README_win32.
+* Add internal header containing only char/short/int/AO_t atomic loads.
+* Add load/store primitives generalization based on CAS.
+* Add lock-based implementation of char/short/int_fetch_compare_and_swap.
+* Add makefile rule to test list_atomic.template syntax (tests).
+* Add missing 'const' in aligned-atomic XSIZE_load implementation.
+* Add missing double_compare_and_swap to generalization.
+* Add missing generalization of no-barrier CAS in template.
+* Add negative double-CAS test cases to test_atomic_include (tests).
+* Add test_stack to Makefile.msft (tests).
+* Adjust fprintf arguments type matching specifier in test_stack (tests).
+* Adjust included filenames in atomic_ops_malloc and test_stack.
+* Adjust quotes in echo command of Makefile.msft (Win32).
+* Always use 'mfence' for nop_full if target CPU supports SSE2 (gcc/x86).
+* Better document configure THREADDLLIBS variable.
+* Cast away volatile on dereference in CAS-based generalization primitives.
+* Change policy regarding version numbers ("micro" part instead of "alpha").
+* Convert README to Markdown format.
+* Define AO_NO_PTHREADS in configure if no pthreads (Win32 and VxWorks).
+* Define AO_int_X operations for ARM and avr32.
+* Define double-wide ordered loads/stores for x86.
+* Define int_and/or/xor primitives in ao_t_is_int header.
+* Define nop_full as compiler barrier for pre-ARMv6 single-core case.
+* Do not duplicate BUILT_SOURCES entries in nobase_private_HEADERS (Makefile).
+* Do not include standard_ao_double_t.h where double-CAS is unimplemented.
+* Do not report absence of meaningless nop, load and store in test_atomic.
+* Do not use deprecated AO_T and AO_TS_T (tests).
+* Eliminate 'missing initializer' warning for AO_stack_t value initializer.
+* Eliminate 64-bit compiler warnings in atomic_ops_malloc.
+* Eliminate arithmetic shifts in double-CAS (gcc/arm, msftc/x86).
+* Eliminate warning for fetch_and_add argument in test_atomic_include (tests).
+* Enable Makefile.msft for Win64.
+* Enable build using toolchain without pthreads.
+* Enable double_compare_and_swap for non-cpp code (msftc/x86.h).
+* Enable generalization of all variants of CAS via fetch_compare_and_swap.
+* Enable test_stack for pthreads-w32 and Win32 with native threads.
+* Fix generalized AO_char/short_compare_and_swap args (missing 'unsigned').
+* Fix makefile sed rule for list_atomic (tests).
+* Fix missing abort() usage in atomic_ops_malloc and tests on WinCE.
+* Generalize compare_double_and_swap_double using double_compare_and_swap.
+* Generalize double_load/store for x86_64 (GCC).
+* Generate ao_t_is_int, 'loadstore' headers from templates.
+* Generate generalized AO_t load/store/fetch_and_add primitives from template.
+* Generate ordered_loads/stores_only headers from templates.
+* Group all X_acquire_release_volatile.h and X_[aligned_]atomic_load_store.h.
+* Implement and/or/xor, AO_double_load for ARM.
+* Implement atomic store using direct write by default on ARMv6+.
+* Implement char/short/int-wide primitives using GCC built-in atomic/sync.
+* Implement char/short/int_fetch_and_add for msftc/x86[_64] (Win32).
+* Implement char/short_fetch_and_add, char/short_load for ARMv6+ (GCC).
+* Implement char/short_store primitives at aligned addresses for ARM.
+* Implement compare_double_and_swap_double for SunCC/x86.
+* Implement double_load/store based on guaranteed x86 access atomicity.
+* Implement double_store for ARMv7 using LDREXD/STREXD.
+* Implement load/store via simple LDR/STR for ARMv6+ (msftc).
+* Implement nop_full/write using 'dmb' instruction if available (gcc/arm).
+* Improve debug printing in test_stack (tests).
+* Log messages to stdout instead of stderr (tests).
+* Make AO_ASSUME_VISTA also enables Win98 code in msftc/x86.h (Win32).
+* Minimize gcc/generic-arithm template by factoring out barriers.
+* Move 'unsigned' keyword to XCTYPE in generalize-small template.
+* Move default compiler options to CFLAGS in Makefile.msft (Win32).
+* Move definitions of ordered loads/stores to inner separate headers.
+* Move gcc-generic AO_t-wide primitives to generic-small/arithm headers.
+* Move generalized arithmetical primitives to 'generalize-arithm' template.
+* Optimize AO_spin manually to minimize compiler influence on its duration.
+* Parameterize list_atomic template with XSIZE (tests).
+* Perform only few list reversals in test_malloc if AO based on pthreads.
+* Put autogen.sh to 'dist' package (Automake).
+* Remote duplicate definition of test_and_set_acquire in generalize.h.
+* Remove X_aligned_atomic_load_store headers and template.
+* Remove duplicate AO_spin and AO_pause definition in atomic_ops_stack.
+* Remove gcc/x86_64.h eliminating code duplication of gcc/x86.h.
+* Remove nested AO_USE_PTHREAD_DEFS macro check in atomic_ops.h (gcc/arm).
+* Remove redundant 'cc' clobber for LDREXD instruction (gcc/arm).
+* Remove store_full from msftc/arm.h in favor of generalized primitive.
+* Remove sunc/x86_64.h eliminating code duplication of sunc/x86.h.
+* Remove unsafe emulation-based implementation of double CAS (SunCC/x86_64).
+* Remove useless 'perror' call in run_parallel.h (tests).
+* Reorder AO_double_t union elements for AO_DOUBLE_T_INITIALIZER portability.
+* Replace atomic_load_store.template with atomic_load and atomic_store ones.
+* Replace some FIXME items with TODO in atomic_ops.c and sysdeps headers.
+* Specify fetch_and_add/sub1 result as unused in test_atomic (tests).
+* Support AArch64 (64-bit ARM) target (GCC).
+* Support ARMv8 target (gcc/arm).
+* Test double_compare_and_swap in test_atomic (tests).
+* Use AO_ prefix for internal functions in arm_v6.h, hppa.h.
+* Use __atomic GCC built-in to implement generic double-wide CAS.
+* Use built-in __sync CAS for double-CAS if AO_USE_SYNC_CAS_BUILTIN for x86.
+* Workaround GCC 4.4.3 warning reported for 'val' of list_atomic.c (tests).
+
+
+== [7.3alpha2] 2012-05-11 ==
+
+* Add '-no-undefined' to LDFLAGS in src/Makefile.am.
+* Add AO_and, AO_xor atomic operations.
+* Add AO_fetch_compare_and_swap primitives.
+* Add and fill in AUTHORS, TODO files.
+* Add autogen.sh file.
+* Adjust AO_..._H macros in public headers.
+* Code refactoring of gcc/arm.h by introducing AO_ARM_HAVE_x macros.
+* Define AO macros for libatomic_ops version identification.
+* Do not define NDEBUG if '--enable-assertions' passed to configure.
+* Eliminate compiler warnings in various functions and macros.
+* Generalize AO_compare_and_swap primitives via AO_fetch_compare_and_swap.
+* Generalize acquire/release/full CAS primitives for MIPS
+* Implement fetch_and_add, test_and_set primitives for MIPS.
+* Improve Makefile for MS VC++; pass '-W3' option to MS compiler.
+* Include ao_t_is_int.h from atomic_ops.h after first generalization pass
+* Merge all Makefile.am files in src tree.
+* Minor code refactoring of atomic_ops.c, generic_pthread.h.
+* Minor configure build improvements (e.g., ensure proper autoconf version).
+* Place only major per-release changes description to ChangeLog (this file).
+* Recognize AO_PREFER_GENERALIZED macro to favor generalization over assembly.
+* Remove all auto-generated files except for generalize-small.h from the repo.
+* Remove duplicate doc/COPYING and empty NEWS files.
+* Replace atomic_ops_malloc static mmap-related empty functions with macros.
+* Replace pointer relational comparisons with non-pointer ones.
+* Require autoconf 2.61 instead of v2.64.
+* Show extra compiler warnings (GCC only).
+* Turn off AO primitives inlining if AO_NO_INLINE defined.
+* Use __builtin_expect in CAS failure loop condition checks (GCC only).
+
+
+== [7.2g] (unset) ==
+
+* Remove inclusion of acquire_release_volatile.h on MIPS.
+
+
+== [7.2f] 2014-05-02 ==
+
+* Fix a typo in doc/README.txt (remove redundant "an" article).
+* Regenerate configure files by new automake (v1.14.1), libtool (v2.4.2.418).
+
+
+== [7.2e] 2013-11-10 ==
+
+* Fix (remove) invalid include of read_ordered.h for ARM.
+* Fix AM_CONFIG_HEADER in configure for autoconf-2.69-1.
+* Fix AO_pause sleep delay for particular argument values (Win32).
+* Fix ARMv7 LDREXD/STREXD double-wide operand specification (GCC/Clang).
+* Fix LDREXD/STREXD use for pre-Clang3.3/arm.
+* Fix README regarding _acquire_read barrier.
+* Fix XSIZE_load/store definition order in generalize-small template.
+* Fix asm constraint of CAS memory operand for gcc/alpha, clang-3.1/mips.
+* Fix asm constraints of primitives in sunc/x86.h.
+* Fix cmpxchg16b-based compare_double_and_swap_double for SunCC/x86_64.
+* Fix compare_double_and_swap_double and double_ptr_storage for gcc/x32.
+* Fix compare_double_and_swap_double for clang3.0/x86 in PIC mode.
+* Fix compare_double_and_swap_double_full definition condition in emul_cas.
+* Fix generalize-small template adding missed CAS-based fetch_and_add.
+* Fix generalized fetch_and_add function.
+* Fix missing compiler barrier in nop_full for uniprocessor ARM.
+* Fix ordered_except_wr header inclusion for s390.
+* Fix return type of AO_int_X primitives defined in ao_t_is_int header.
+* Fix return type of char/short/int_load_read() in read_ordered.h.
+* Fix template-based headers regeneration order in src/Makefile.
+* Fix typos in ao_t_is_int, atomic_ops.h, generalize.h, msftc/arm.h comments.
+* Fix variable type to match printf format specifier in test_stack.
+* Fix visibility and initial value of 'dummy' variable in atomic_ops_stack.
+* Terminate tests with abort after error reported.
+
+
+== [7.2d] 2012-08-09 ==
+
+* Fix AO_compare_double_and_swap_double_full for gcc-4.2.1/x86 in PIC mode.
+* Fix AO_compiler_barrier missing parentheses.
+* Fix missing 'unsigned' for generalized AO_char/short_fetch_and_add result.
+
+
+== [7.2] 2012-05-11 ==
+
+* Add atomic_ops.pc.in and atomic_ops-uninstalled.pc.in to pkgconfig folder.
+* Define and use AO_PTRDIFF_T in tests for casts between pointer and integer.
+* Fix AO_compare_and_swap return type for s390 and PowerPC.
+* Fix AO_compare_double_and_swap_double_full for gcc/x86 (PIC mode).
+* Fix AO_stack_push_release to workaround bug in clang-1.1/x86 compiler.
+* Fix AO_test_and_setXX in tests/list_atomic.template.
+* Fix AO_test_and_set_full (gcc/x86[_64].h) to work-around a bug in LLVM v2.7.
+* Fix AO_test_and_set_full on m68k.
+* Fix __ARM_ARCH_5__ macro handling for Android NDK (ARMv7).
+* Fix configure for Cygwin, mingw-w64/32.
+* Fix configure to define __PIC__ macro explicitly if needed (GCC).
+* Fix double_ptr_storage definition for GCC pre-v4 (x86_64).
+* Fix for x32 by removing 'q' suffix in x86-64 instructions.
+* Fix generalization for IA-64 (regarding AO_or, AO_..._read/write primitives)
+* Fix generalized AO_<type>_fetch_and_add() return type.
+* Fix test_atomic_include for the case of missing CAS primitive.
+* Fix test_malloc - allocate less memory in case of missing mmap.
+* Implement the basic atomic primitives for the hexagon CPU.
+
+
+== [7.2alpha6] 2011-06-14 ==
+
+* Add missing AO_HAVE_ macros.
+* Add support of avr32 CPU.
+* Better support of various models of ARM.
+* Disable AO_compare_double_and_swap_double_full for SunCC x86 as not working.
+* Enable ARM Thumb-2 mode.
+* Fix AO_test_and_set_full for SunCC (x86).
+* Fix bugs in tests.
+* Fix clobbers in AO_compare_and_swap_full (x86.h).
+* Fix typos in identifiers and comments.
+* Improve AO_sync for PowerPC.
+* Improve make scripts (configure.ac).
+* Make get_mmaped() in atomic_ops_malloc.c more portable.
+* Support Intel compiler.
+* Support NaCl target.
+* Suppress compiler warnings in various places.
+* Test more predefined macros (ARM, PowerPC).
+* Use assembly code only for MS VC if available (x86_64).
+* Use built-in __sync_bool_compare_and_swap if available (x86_64).
+* Workaround bugs in LLVM GCC and SunCC regarding XCHG (x86, x86_64).
+
+
+== [7.2alpha4] 2009-12-02 ==
+
+* Fix typos in comments, identifiers and documentation.
+* Implement AO_compare_and_swap_full for SPARC.
+* Refine ARM-specific code.
+* Refine code and comments for MS VC.
+* Regenerate make scripts.
+* Share common code for all 32-bit CPUs (MS VC).
+* Support DigitalMars and Watcom compilers.
+* Support MS VC for ARM (WinCE).
+* Support SH CPU.
+* Support win32-pthreads.
+* Support x86 and x86_64 for SunCC compiler.
+
+
+== [7.2alpha2] 2009-05-27 ==
+
+* Add MIPS support.
+* Add better support for m68k.
+* Add "const" to first parameter of load calls.
+* Add parentheses around address argument for various macros.
+* Add some platform-specific documentation to INSTALL.
+* Add untested 64-bit support for PowerPC.
+* Fix AO_compare_and_swap_double_acquire.
+* Fix AO_int_fetch_and_add_full (x86_64).
+* Fix comments.
+* Fix s390 include paths.
+* Fix use of lwz instruction (PowerPC).
+* Refine clobbers (PowerPC).
+* Remove outdated info about Windows support in README.
+* Replace K&R-style function definition with ANSI C one.
+* add AO_compare_double_and_swap_double for ARMv6.
+* gcc/powerpc.h: Consider __NO_LWSYNC__.
+
+
+== [7.1] 2008-02-11 ==
+
+* Add test_and_set, AO_double_compare_and_swap generalizations.
+* Conditionally add compare_double_and_swap_double (x86).
+* Conditionally add compare_double_and_swap_double (x86).
+* Fix AO_compare_double_and_swap_double_full (x86) for PIC mode.
+* Fix AO_load_acquire for PowerPC.
+* Fix double-width CAS (x86).
+* Refine README (add more warnings about data dependencies).
+* Refine double_ptr_storage type definition.
+* Support ARMv6+ in GCC.
+* Support ArmCC compiler.
+* Use _InterlockedExchangeAdd for MS VC (x86).
+
+
+== [7.0] 2007-06-28 ==
+
+* Add 64-bit version of AO_load_acquire for PowerPC (by Luca Barbato).
+* Add support of x86 and x86_64 for MS VC.
+* Do not assume that "mfence" is always present (x86.h).
+* Fix ARM AO_test_and_set_full.
+* Include windows.h (MS VC).
+* Update README to reflect C++0x effort.
+
+
+== [1.2] 2006-07-11 ==
+
+* Add prototypes to suppress compiler warnings.
+* Add simple VxWorks support.
+* Fix InterlockedCompareExchange proto usage.
+* Fix typos (ia64).
+* Include all_acquire_release_volatile.h and all_atomic_load_store.h (ia64).
+* Initial support for 64-bit targets.
+* Use "=q" for AO_test_and_set_full (x86).
+* Use inline assembler to generate "mfence" and byte sized XCHG.
+* Use new intrinsics available in MSVC 2003 and MSVC 2005.
+
+
+== [1.1] 2005-09-27 ==
+
+* Add and use read_ordered.h.
+* Change function naming from "byte" to "char".
+* Fix AO_test_and_set for ARM; define AO_CAN_EMUL_CAS.
+
+
+== [1.0] 2005-03-21 ==
+
+* Add atomic_ops primitives for different sized data.
+* Add compare_double_and_swap_double and compare_and_swap_double.
+* Add gcc/cris.h (originally comes from Hans-Peter Nilsson).
+* Add gcc/m68k.h (contributed by Tony Mantler).
+* Add gcc/powerpc.h (with help of Maged Michael, Doug Lea, Roger Hoover).
+* Add initial support for atomic_ops for VC++/Windows/X86 and HP/UX.
+* Add minimal support for the Sun SPARC compiler.
+* Add support for platforms that require out-of-line assembly code.
+* Add support of int-wide operations on platforms with int-sized pointers.
+* Added libatomic_ops_gpl library with support for lock-free stack and malloc.
+* Change atomic_ops include file structure.
+* Change most platforms to use byte-wide test-and-set locations.
+* Define AO_CLEAR, __ldcw[_align] macros in gcc/hppa.h (by Carlos O'Donell).
+* Fix various bugs.
+* Install under "atomic_ops" instead of "ao".
+* Remove compiler_barrier workaround for gcc 3.4+.
+* Renamed various types to end in _t.
+* Replace AO_HAVE_NOP_FULL with AO_HAVE_nop_full (by Ranko Zivojnovic).
+* Use autoconf, automake.
--- /dev/null
+SUBDIRS = src doc tests
+
+ACLOCAL_AMFLAGS = -I m4
+
+pkgconfigdir = $(libdir)/pkgconfig
+pkgconfig_DATA = pkgconfig/atomic_ops.pc
+noinst_DATA = pkgconfig/atomic_ops-uninstalled.pc
+
+dist_pkgdata_DATA = COPYING README.md
+
+EXTRA_DIST = autogen.sh
+
+#distclean-local:
--- /dev/null
+# The atomic_ops library (libatomic_ops)
+
+This is version 7.5.0 of libatomic_ops.
+
+You might find a more recent version
+[here](http://www.hboehm.info/gc/), or
+[here](https://github.com/ivmai/libatomic_ops).
+
+
+## Overview
+
+This package provides semi-portable access to hardware-provided
+atomic memory update operations on a number architectures. These might
+allow you to write code:
+
+* That does more interesting things in signal handlers.
+
+* Makes more effective use of multiprocessors by allowing you to write
+ clever lock-free code. Note that such code is very difficult to get
+ right, and will unavoidably be less portable than lock-based code. It
+ is also not always faster than lock-based code. But it may occasionally
+ be a large performance win.
+
+* To experiment with new and much better thread programming paradigms, etc.
+
+For details and licensing restrictions see the files in the "doc"
+subdirectory.
+
+Please address bug reports to [mailing list](mailto:bdwgc@lists.opendylan.org).
+
+
+## Installation and Usage
+
+The configuration and build scripts for this package were generated by
+Automake/Autoconf. "./configure; make; sudo make install" in this
+directory should work. For a more customized build, see the output of
+"./configure --help".
+
+Note that much of the content of this library is in the header files.
+However, two small libraries are built and installed:
+
+* libatomic_ops.a is a support library, which is not needed on some platforms.
+ This is intended to be usable, under some mild restrictions, in free or
+ proprietary code, as are all the header files. See doc/LICENSING.txt.
+
+* libatomic_ops_gpl.a contains some higher level facilities. This code is
+ currently covered by the GPL. The contents currently correspond to
+ the headers atomic_ops_stack.h and atomic_ops_malloc.h.
+
+
+## Platform Specific Notes
+
+Win32/64: src/Makefile.msft contains a very simple Makefile for building
+and running tests and building the gpl library. The core atomic_ops
+implementation is entirely in header files.
+
+HP-UX/PA-RISC: aCC -Ae won't work as a C compiler, since it doesn't support
+inline assembly code. Use cc.
+
+
+## Copyright & Warranty
+
+See doc/LICENSING.txt file.
--- /dev/null
+== TODO tasks ==
+
+Add C++0x ATM (atomic memory operations) layer.
+
+
+== FIXME tasks ==
+
+RHELinux6/POWER7 (gcc-4.4.7-3/ppc64), Fedora16/POWER7 (gcc-4.6.2-1/ppc64),
+Debian/powerpc (gcc 4.6.3-7):
+test_stack failed (Debian Bug #680100).
+
+Debian/m68k (Linux 3.2.0-2-atari):
+test_stack failed (Bus error), regression (Debian Bug #680066).
--- /dev/null
+#!/bin/sh
+set -e
+
+# This script creates (or regenerates) configure (as well as aclocal.m4,
+# config.h.in, Makefile.in, etc.) missing in the source repository.
+
+autoreconf -i
+
+echo
+echo "Ready to run './configure'."
--- /dev/null
+# Process this file with autoconf to produce a configure script.
+AC_INIT([libatomic_ops],[7.5.0],bdwgc@lists.opendylan.org)
+
+AC_PREREQ(2.61)
+AC_CANONICAL_TARGET([])
+AC_CONFIG_SRCDIR(src/atomic_ops.c)
+AC_CONFIG_MACRO_DIR([m4])
+AM_INIT_AUTOMAKE([foreign dist-bzip2 nostdinc])
+AM_MAINTAINER_MODE
+
+AC_CONFIG_HEADERS([src/config.h])
+
+# Checks for programs.
+AM_PROG_CC_C_O
+AM_PROG_AS
+LT_INIT([disable-shared])
+
+# Checks for functions.
+AC_FUNC_MMAP
+
+# Determine PIC flag.
+need_asm=false
+PICFLAG=
+AC_MSG_CHECKING(for PIC compiler flag)
+if test "$GCC" = yes; then
+ case "$host" in
+ *-*-cygwin* | *-*-mingw*)
+ # Cygwin and Mingw[-w32/64] do not need -fPIC.
+ AC_MSG_RESULT("<none>")
+ ;;
+ *)
+ AC_MSG_RESULT(-fPIC)
+ PICFLAG=-fPIC
+ AC_MSG_CHECKING(whether gcc -fPIC causes __PIC__ definition)
+ # Workaround: at least GCC 3.4.6 (Solaris) does not define this macro.
+ old_CFLAGS="$CFLAGS"
+ CFLAGS="$PICFLAG $CFLAGS"
+ AC_TRY_COMPILE([],[
+ #ifndef __PIC__
+ # error
+ #endif
+ ], [ac_cv_pic_macro=yes], [ac_cv_pic_macro=no])
+ CFLAGS="$old_CFLAGS"
+ AC_MSG_RESULT($ac_cv_pic_macro)
+ AS_IF([test "$ac_cv_pic_macro" = yes], [],
+ [PICFLAG="-D__PIC__=1 $PICFLAG"])
+ ;;
+ esac
+
+ # Output all warnings.
+ AC_MSG_CHECKING(for gcc -Wextra)
+ old_CFLAGS="$CFLAGS"
+ CFLAGS="-Wextra $CFLAGS"
+ AC_TRY_COMPILE([],[], [ac_cv_cc_wextra=yes], [ac_cv_cc_wextra=no])
+ CFLAGS="$old_CFLAGS"
+ AC_MSG_RESULT($ac_cv_cc_wextra)
+ AS_IF([test "$ac_cv_cc_wextra" = yes], [WEXTRA="-Wextra"], [WEXTRA="-W"])
+ CFLAGS="-Wall $WEXTRA $CFLAGS"
+else
+ case "$host" in
+ *-*-hpux*)
+ AC_MSG_RESULT("+Z")
+ PICFLAG="+Z"
+ CFLAGS="+O2 -mt $CFLAGS"
+ ;;
+ *-*-solaris*)
+ AC_MSG_RESULT(-Kpic)
+ PICFLAG=-Kpic
+ CFLAGS="-O $CFLAGS"
+ need_asm=true
+ ;;
+ *-*-linux*)
+ AC_MSG_RESULT(-fPIC)
+ PICFLAG=-fPIC
+ # Any Linux compiler had better be gcc compatible.
+ ;;
+ *)
+ AC_MSG_RESULT("<none>")
+ ;;
+ esac
+fi
+
+AC_ARG_ENABLE(assertions,
+ [AC_HELP_STRING([--enable-assertions], [Assertion checking])])
+if test "$enable_assertions" != yes; then
+ AC_DEFINE([NDEBUG], 1, [Define to disable assertion checking.])
+fi
+
+AC_SUBST(PICFLAG)
+AC_SUBST(DEFS)
+
+AH_TEMPLATE([_PTHREADS], [Indicates the use of pthreads (NetBSD).])
+
+AH_TEMPLATE([AO_USE_NANOSLEEP],
+ [Use nanosleep() instead of select() (only if atomic operations \
+ are emulated)])
+AH_TEMPLATE([AO_USE_NO_SIGNALS],
+ [Do not block signals in compare_and_swap (only if atomic operations \
+ are emulated)])
+AH_TEMPLATE([AO_USE_WIN32_PTHREADS],
+ [Use Win32 Sleep() instead of select() (only if atomic operations \
+ are emulated)])
+AH_TEMPLATE([AO_TRACE_MALLOC], [Trace AO_malloc/free calls (for debug only)])
+
+# These macros are tested in public headers
+AH_TEMPLATE([AO_GENERALIZE_ASM_BOOL_CAS],
+ [Force compare_and_swap definition via fetch_compare_and_swap])
+AH_TEMPLATE([AO_PREFER_GENERALIZED],
+ [Prefer generalized definitions to direct assembly-based ones])
+AH_TEMPLATE([AO_USE_PTHREAD_DEFS],
+ [Emulate atomic operations via slow and async-signal-unsafe \
+ pthread locking])
+AH_TEMPLATE([AO_ASM_X64_AVAILABLE],
+ [Inline assembly avalable (only VC/x86_64)])
+AH_TEMPLATE([AO_ASSUME_VISTA],
+ [Assume Windows Server 2003, Vista or later target (only VC/x86)])
+AH_TEMPLATE([AO_ASSUME_WINDOWS98],
+ [Assume hardware compare-and-swap functionality available \
+ on target (only VC/x86)])
+AH_TEMPLATE([AO_CMPXCHG16B_AVAILABLE],
+ [Assume target is not old AMD Opteron chip (only x86_64)])
+AH_TEMPLATE([AO_FORCE_USE_SWP],
+ [Force test_and_set to use SWP instruction instead of LDREX/STREX \
+ (only arm v6+)])
+AH_TEMPLATE([AO_NO_SPARC_V9], [Assume target is not sparc v9+ (only sparc)])
+AH_TEMPLATE([AO_OLD_STYLE_INTERLOCKED_COMPARE_EXCHANGE],
+ [Assume ancient MS VS Win32 headers (only VC/arm v6+, VC/x86)])
+AH_TEMPLATE([AO_UNIPROCESSOR], [Assume single-core target (only arm v6+)])
+AH_TEMPLATE([AO_USE_INTERLOCKED_INTRINSICS],
+ [Assume Win32 _Interlocked primitives available as intrinsics \
+ (only VC/arm)])
+AH_TEMPLATE([AO_USE_PENTIUM4_INSTRS],
+ [Use Pentium 4 'mfence' instruction (only x86)])
+AH_TEMPLATE([AO_USE_SYNC_CAS_BUILTIN],
+ [Prefer GCC built-in CAS intrinsics in favor of inline assembly \
+ (only gcc/x86, gcc/x86_64)])
+AH_TEMPLATE([AO_WEAK_DOUBLE_CAS_EMULATION],
+ [Emulate double-width CAS via pthread locking in case of no hardware \
+ support (only gcc/x86_64, the emulation is unsafe)])
+
+AC_DEFINE(_REENTRANT, 1, [Required define if using POSIX threads.])
+
+# Libraries needed to support threads (if any).
+have_pthreads=false
+AC_CHECK_LIB(pthread, pthread_self, have_pthreads=true)
+if test x$have_pthreads = xtrue; then
+ THREADDLLIBS=-lpthread
+ case "$host" in
+ *-*-netbsd*)
+ # Indicates the use of pthreads.
+ AC_DEFINE(_PTHREADS)
+ ;;
+ *-*-openbsd* | *-*-kfreebsd*-gnu | *-*-dgux*)
+ THREADDLLIBS=-pthread
+ ;;
+ *-*-cygwin* | *-*-darwin*)
+ # Cygwin does not have a real libpthread, so Libtool cannot link
+ # against it.
+ THREADDLLIBS=
+ ;;
+ *-*-mingw*)
+ # Use Win32 threads for tests anyway.
+ THREADDLLIBS=
+ # Skip test_atomic_pthreads.
+ have_pthreads=false
+ ;;
+ esac
+else
+ AC_DEFINE([AO_NO_PTHREADS], 1, [No pthreads library available])
+ # Assume VxWorks or Win32.
+ THREADDLLIBS=
+fi
+AC_SUBST(THREADDLLIBS)
+
+AM_CONDITIONAL(HAVE_PTHREAD_H, test x$have_pthreads = xtrue)
+AM_CONDITIONAL(NEED_ASM, test x$need_asm = xtrue)
+
+AC_CONFIG_FILES([
+ Makefile
+ doc/Makefile
+ src/Makefile
+ tests/Makefile
+ pkgconfig/atomic_ops.pc
+ pkgconfig/atomic_ops-uninstalled.pc ])
+
+AC_CONFIG_COMMANDS([default],[[]],[[
+PICFLAG="${PICFLAG}"
+CC="${CC}"
+DEFS="${DEFS}"
+]])
+AC_OUTPUT
--- /dev/null
+Our intent is to make it easy to use libatomic_ops, in
+both free and proprietary software. Hence most code that we expect to be
+linked into a client application is covered by an MIT-style license.
+
+A few library routines are covered by the GNU General Public License.
+These are put into a separate library, libatomic_ops_gpl.a .
+
+The low-level part of the library is mostly covered by the following
+license:
+
+----------------------------------------
+
+Copyright (c) ...
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+--------------------------------
+
+A few files in the sysdeps directory were inherited in part from the
+Boehm-Demers-Weiser conservative garbage collector, and are covered by
+its license, which is similar in spirit:
+
+--------------------------------
+
+Copyright (c) ...
+
+THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+
+Permission is hereby granted to use or copy this program
+for any purpose, provided the above notices are retained on all copies.
+Permission to modify the code and to distribute modified code is granted,
+provided the above notices are retained, and a notice that the code was
+modified is included with the above copyright notice.
+
+----------------------------------
+
+A few files are covered by the GNU General Public License. (See file
+"COPYING".) This applies only to test code, sample applications,
+and the libatomic_ops_gpl portion of the library.
+Thus libatomic_ops_gpl should generally not be linked into proprietary code.
+(This distinction was motivated by patent considerations.)
+
+It is possible that the license of the GPL pieces may be changed for
+future versions to make them more consistent with the rest of the package.
+If you submit patches, and have strong preferences about licensing, please
+express them.
--- /dev/null
+# installed documentation
+#
+dist_pkgdata_DATA=LICENSING.txt README.txt README_stack.txt \
+ README_malloc.txt README_win32.txt
--- /dev/null
+Usage:
+
+0) If possible, do this on a multiprocessor, especially if you are planning
+on modifying or enhancing the package. It will work on a uniprocessor,
+but the tests are much more likely to pass in the presence of serious problems.
+
+1) Type ./configure --prefix=<install dir>; make; make check
+in the directory containing unpacked source. The usual GNU build machinery
+is used, except that only static, but position-independent, libraries
+are normally built. On Windows, read README_win32.txt instead.
+
+2) Applications should include atomic_ops.h. Nearly all operations
+are implemented by header files included from it. It is sometimes
+necessary, and always recommended to also link against libatomic_ops.a.
+To use the almost non-blocking stack or malloc implementations,
+see the corresponding README files, and also link against libatomic_gpl.a
+before linking against libatomic_ops.a.
+
+OVERVIEW:
+Atomic_ops.h defines a large collection of operations, each one of which is
+a combination of an (optional) atomic memory operation, and a memory barrier.
+Also defines associated feature-test macros to determine whether a particular
+operation is available on the current target hardware (either directly or
+by synthesis). This is an attempt to replace various existing files with
+similar goals, since they usually do not handle differences in memory
+barrier styles with sufficient generality.
+
+If this is included after defining AO_REQUIRE_CAS, then the package
+will make an attempt to emulate compare-and-swap in a way that (at least
+on Linux) should still be async-signal-safe. As a result, most other
+atomic operations will then be defined using the compare-and-swap
+emulation. This emulation is slow, since it needs to disable signals.
+And it needs to block in case of contention. If you care about performance
+on a platform that can't directly provide compare-and-swap, there are
+probably better alternatives. But this allows easy ports to some such
+platforms (e.g. PA_RISC). The option is ignored if compare-and-swap
+can be implemented directly.
+
+If atomic_ops.h is included after defining AO_USE_PTHREAD_DEFS, then all
+atomic operations will be emulated with pthread locking. This is NOT
+async-signal-safe. And it is slow. It is intended primarily for debugging
+of the atomic_ops package itself.
+
+Note that the implementation reflects our understanding of real processor
+behavior. This occasionally diverges from the documented behavior. (E.g.
+the documented X86 behavior seems to be weak enough that it is impractical
+to use. Current real implementations appear to be much better behaved.)
+We of course are in no position to guarantee that future processors
+(even HPs) will continue to behave this way, though we hope they will.
+
+This is a work in progress. Corrections/additions for other platforms are
+greatly appreciated. It passes rudimentary tests on X86, Itanium, and
+Alpha.
+
+OPERATIONS:
+
+Most operations operate on values of type AO_t, which are unsigned integers
+whose size matches that of pointers on the given architecture. Exceptions
+are:
+
+- AO_test_and_set operates on AO_TS_t, which is whatever size the hardware
+supports with good performance. In some cases this is the length of a cache
+line. In some cases it is a byte. In many cases it is equivalent to AO_t.
+
+- A few operations are implemented on smaller or larger size integers.
+Such operations are indicated by the appropriate prefix:
+
+AO_char_... Operates on unsigned char values.
+AO_short_... Operates on unsigned short values.
+AO_int_... Operates on unsigned int values.
+
+(Currently a very limited selection of these is implemented. We're
+working on it.)
+
+The defined operations are all of the form AO_[<size>_]<op><barrier>(<args>).
+
+The <op> component specifies an atomic memory operation. It may be
+one of the following, where the corresponding argument and result types
+are also specified:
+
+void nop()
+ No atomic operation. The barrier may still be useful.
+AO_t load(const volatile AO_t * addr)
+ Atomic load of *addr.
+void store(volatile AO_t * addr, AO_t new_val)
+ Atomically store new_val to *addr.
+AO_t fetch_and_add(volatile AO_t *addr, AO_t incr)
+ Atomically add incr to *addr, and return the original value of *addr.
+AO_t fetch_and_add1(volatile AO_t *addr)
+ Equivalent to AO_fetch_and_add(addr, 1).
+AO_t fetch_and_sub1(volatile AO_t *addr)
+ Equivalent to AO_fetch_and_add(addr, (AO_t)(-1)).
+void and(volatile AO_t *addr, AO_t value)
+ Atomically 'and' value into *addr.
+void or(volatile AO_t *addr, AO_t value)
+ Atomically 'or' value into *addr.
+void xor(volatile AO_t *addr, AO_t value)
+ Atomically 'xor' value into *addr.
+int compare_and_swap(volatile AO_t * addr, AO_t old_val, AO_t new_val)
+ Atomically compare *addr to old_val, and replace *addr by new_val
+ if the first comparison succeeds. Returns nonzero if the comparison
+ succeeded and *addr was updated.
+AO_t fetch_compare_and_swap(volatile AO_t * addr, AO_t old_val, AO_t new_val)
+ Atomically compare *addr to old_val, and replace *addr by new_val
+ if the first comparison succeeds; returns the original value of *addr.
+AO_TS_VAL_t test_and_set(volatile AO_TS_t * addr)
+ Atomically read the binary value at *addr, and set it. AO_TS_VAL_t
+ is an enumeration type which includes two values AO_TS_SET and
+ AO_TS_CLEAR. An AO_TS_t location is capable of holding an
+ AO_TS_VAL_t, but may be much larger, as dictated by hardware
+ constraints. Test_and_set logically sets the value to AO_TS_SET.
+ It may be reset to AO_TS_CLEAR with the AO_CLEAR(AO_TS_t *) macro.
+ AO_TS_t locations should be initialized to AO_TS_INITIALIZER.
+ The values of AO_TS_SET and AO_TS_CLEAR are hardware dependent.
+ (On PA-RISC, AO_TS_SET is zero!)
+
+Test_and_set is a more limited version of compare_and_swap. Its only
+advantage is that it is more easily implementable on some hardware. It
+should thus be used if only binary test-and-set functionality is needed.
+
+If available, we also provide compare_and_swap operations that operate
+on wider values. Since standard data types for double width values
+may not be available, these explicitly take pairs of arguments for the
+new and/or old value. Unfortunately, there are two common variants,
+neither of which can easily and efficiently emulate the other.
+The first performs a comparison against the entire value being replaced,
+where the second replaces a double-width replacement, but performs
+a single-width comparison:
+
+int compare_double_and_swap_double(volatile AO_double_t * addr,
+ AO_t old_val1, AO_t old_val2,
+ AO_t new_val1, AO_t new_val2);
+
+int compare_and_swap_double(volatile AO_double_t * addr,
+ AO_t old_val1,
+ AO_t new_val1, AO_t new_val2);
+
+where AO_double_t is a structure containing AO_val1 and AO_val2 fields,
+both of type AO_t. For compare_and_swap_double, we compare against
+the val1 field. AO_double_t exists only if AO_HAVE_double_t
+is defined.
+
+ORDERING CONSTRAINTS:
+
+Each operation name also includes a suffix that specifies the associated
+ordering semantics. The ordering constraint limits reordering of this
+operation with respect to other atomic operations and ordinary memory
+references. The current implementation assumes that all memory references
+are to ordinary cacheable memory; the ordering guarantee is with respect
+to other threads or processes, not I/O devices. (Whether or not this
+distinction is important is platform-dependent.)
+
+Ordering suffixes are one of the following:
+
+<none>: No memory barrier. A plain AO_nop() really does nothing.
+_release: Earlier operations must become visible to other threads
+ before the atomic operation.
+_acquire: Later operations must become visible after this operation.
+_read: Subsequent reads must become visible after reads included in
+ the atomic operation or preceding it. Rarely useful for clients?
+_write: Earlier writes become visible before writes during or after
+ the atomic operation. Rarely useful for clients?
+_full: Ordered with respect to both earlier and later memory ops.
+ AO_store_full or AO_nop_full are the normal ways to force a store
+ to be ordered with respect to a later load.
+_release_write: Ordered with respect to earlier writes. This is
+ normally implemented as either a _write or _release
+ barrier.
+_acquire_read: Ordered with respect to later reads. This is
+ normally implemented as either a _read or _acquire barrier.
+_dd_acquire_read: Ordered with respect to later reads that are data
+ dependent on this one. This is needed on
+ a pointer read, which is later dereferenced to read a
+ second value, with the expectation that the second
+ read is ordered after the first one. On most architectures,
+ this is equivalent to no barrier. (This is very
+ hard to define precisely. It should probably be avoided.
+ A major problem is that optimizers tend to try to
+ eliminate dependencies from the generated code, since
+ dependencies force the hardware to execute the code
+ serially.)
+
+We assume that if a store is data-dependent on a previous load, then
+the two are always implicitly ordered.
+
+It is possible to test whether AO_<op><barrier> is available on the
+current platform by checking whether AO_HAVE_<op>_<barrier> is defined
+as a macro.
+
+Note that we generally don't implement operations that are either
+meaningless (e.g. AO_nop_acquire, AO_nop_release) or which appear to
+have no clear use (e.g. AO_load_release, AO_store_acquire, AO_load_write,
+AO_store_read). On some platforms (e.g. PA-RISC) many operations
+will remain undefined unless AO_REQUIRE_CAS is defined before including
+the package.
+
+When typed in the package build directory, the following command
+will print operations that are unimplemented on the platform:
+
+make test_atomic; ./test_atomic
+
+The following command generates a file "list_atomic.i" containing the
+macro expansions of all implemented operations on the platform:
+
+make list_atomic.i
+
+Future directions:
+
+It currently appears that something roughly analogous to this is very likely
+to become part of the C++0x standard. That effort has pointed out a number
+of issues that we expect to address there. Since some of the solutions
+really require compiler support, they may not be completely addressed here.
+
+Known issues include:
+
+We should be more precise in defining the semantics of the ordering
+constraints, and if and how we can guarantee sequential consistency.
+
+Dd_acquire_read is very hard or impossible to define in a way that cannot
+be invalidated by reasonably standard compiler transformations.
+
+There is probably no good reason to provide operations on standard
+integer types, since those may have the wrong alignment constraints.
+
+
+Example:
+
+If you want to initialize an object, and then "publish" a pointer to it
+in a global location p, such that other threads reading the new value of
+p are guaranteed to see an initialized object, it suffices to use
+AO_release_write(p, ...) to write the pointer to the object, and to
+retrieve it in other threads with AO_acquire_read(p).
+
+Platform notes:
+
+All X86: We quietly assume 486 or better.
+
+Microsoft compilers:
+Define AO_ASSUME_WINDOWS98 to get access to hardware compare-and-swap
+functionality. This relies on the InterlockedCompareExchange() function
+which was apparently not supported in Windows95. (There may be a better
+way to get access to this.)
+
+Gcc on x86:
+Define AO_USE_PENTIUM4_INSTRS to use the Pentium 4 mfence instruction.
+Currently this is appears to be of marginal benefit.
--- /dev/null
+The libatomic_ops_gpl includes a simple almost-lock-free malloc implementation.
+
+This is intended as a safe way to allocate memory from a signal handler,
+or to allocate memory in the context of a library that does not know what
+thread library it will be used with. In either case locking is impossible.
+
+Note that the operations are only guaranteed to be 1-lock-free, i.e. a
+single blocked thread will not prevent progress, but multiple blocked
+threads may. To safely use these operations in a signal handler,
+the handler should be non-reentrant, i.e. it should not be interruptable
+by another handler using these operations. Furthermore use outside
+of signal handlers in a multithreaded application should be protected
+by a lock, so that at most one invocation may be interrupted by a signal.
+The header will define the macro "AO_MALLOC_IS_LOCK_FREE" on platforms
+on which malloc is completely lock-free, and hence these restrictions
+do not apply.
+
+In the presence of threads, but absence of contention, the time performance
+of this package should be as good, or slightly better than, most system
+malloc implementations. Its space performance
+is theoretically optimal (to within a constant factor), but probably
+quite poor in practice. In particular, no attempt is made to
+coalesce free small memory blocks. Something like Doug Lea's malloc is
+likely to use significantly less memory for complex applications.
+
+Performance on platforms without an efficient compare-and-swap implementation
+will be poor.
+
+This package was not designed for processor-scalability in the face of
+high allocation rates. If all threads happen to allocate different-sized
+objects, you might get lucky. Otherwise expect contention and false-sharing
+problems. If this is an issue, something like Maged Michael's algorithm
+(PLDI 2004) would be technically a far better choice. If you are concerned
+only with scalability, and not signal-safety, you might also consider
+using Hoard instead. We have seen a factor of 3 to 4 slowdown from the
+standard glibc malloc implementation with contention, even when the
+performance without contention was faster. (To make the implementation
+more scalable, one would need to replicate at least the free list headers,
+so that concurrent access is possible without cache conflicts.)
+
+Unfortunately there is no portable async-signal-safe way to obtain large
+chunks of memory from the OS. Based on reading of the source code,
+mmap-based allocation appears safe under Linux, and probably BSD variants.
+It is probably unsafe for operating systems built on Mach, such as
+Apple's Darwin. Without use of mmap, the allocator is
+limited to a fixed size, statically preallocated heap (2MB by default),
+and will fail to allocate objects above a certain size (just under 64K
+by default). Use of mmap to circumvent these limitations requires an
+explicit call.
+
+The entire interface to the AO_malloc package currently consists of:
+
+#include <atomic_ops_malloc.h> /* includes atomic_ops.h */
+
+void *AO_malloc(size_t sz);
+void AO_free(void *p);
+void AO_malloc_enable_mmap(void);
--- /dev/null
+Note that the AO_stack implementation is licensed under the GPL,
+unlike the lower level routines.
+
+The header file atomic_ops_stack.h defines a linked stack abstraction.
+Stacks may be accessed by multiple concurrent threads. The implementation
+is 1-lock-free, i.e. it will continue to make progress if at most one
+thread becomes inactive while operating on the data structure.
+
+(The implementation can be built to be N-lock-free for any given N. But that
+seems to rarely be useful, especially since larger N involve some slowdown.)
+
+This makes it safe to access these data structures from non-reentrant
+signal handlers, provided at most one non-signal-handler thread is
+accessing the data structure at once. This latter condition can be
+ensured by acquiring an ordinary lock around the non-handler accesses
+to the data structure.
+
+For details see:
+
+Hans-J. Boehm, "An Almost Non-Blocking Stack", PODC 2004,
+http://portal.acm.org/citation.cfm?doid=1011767.1011774
+(This is not exactly the implementation described there, since the
+interface was cleaned up in the interim. But it should perform
+very similarly.)
+
+We use a fully lock-free implementation when the underlying hardware
+makes that less expensive, i.e. when we have a double-wide compare-and-swap
+operation available. (The fully lock-free implementation uses an AO_t-
+sized version count, and assumes it does not wrap during the time any
+given operation is active. This seems reasonably safe on 32-bit hardware,
+and very safe on 64-bit hardware.) If a fully lock-free implementation
+is used, the macro AO_STACK_IS_LOCK_FREE will be defined.
+
+The implementation is interesting only because it allows reuse of
+existing nodes. This is necessary, for example, to implement a memory
+allocator.
+
+Since we want to leave the precise stack node type up to the client,
+we insist only that each stack node contains a link field of type AO_t.
+When a new node is pushed on the stack, the push operation expects to be
+passed the pointer to this link field, which will then be overwritten by
+this link field. Similarly, the pop operation returns a pointer to the
+link field of the object that previously was on the top of the stack.
+
+The cleanest way to use these routines is probably to define the stack node
+type with an initial AO_t link field, so that the conversion between the
+link-field pointer and the stack element pointer is just a compile-time
+cast. But other possibilities exist. (This would be cleaner in C++ with
+templates.)
+
+A stack is represented by an AO_stack_t structure. (This is normally
+2 or 3 times the size of a pointer.) It may be statically initialized
+by setting it to AO_STACK_INITIALIZER, or dynamically initialized to
+an empty stack with AO_stack_init. There are only three operations for
+accessing stacks:
+
+void AO_stack_init(AO_stack_t *list);
+void AO_stack_push_release(AO_stack_t *list, AO_t *new_element);
+AO_t * AO_stack_pop_acquire(volatile AO_stack_t *list);
+
+We require that the objects pushed as list elements remain addressable
+as long as any push or pop operation are in progress. (It is OK for an object
+to be "pop"ped off a stack and "deallocated" with a concurrent "pop" on
+the same stack still in progress, but only if "deallocation" leaves the
+object addressable. The second "pop" may still read the object, but
+the value it reads will not matter.)
+
+We require that the headers (AO_stack objects) remain allocated and
+valid as long as any operations on them are still in-flight.
+
+We also provide macros AO_REAL_HEAD_PTR that converts an AO_stack_t
+to a pointer to the link field in the next element, and AO_REAL_NEXT_PTR
+that converts a link field to a real, dereferencable, pointer to the link field
+in the next element. This is intended only for debugging, or to traverse
+the list after modification has ceased. There is otherwise no guarantee that
+walking a stack using this macro will produce any kind of consistent
+picture of the data structure.
--- /dev/null
+Most of the atomic_ops functionality is available under Win32 with
+the Microsoft tools, but the build process currently is considerably more
+primitive than on Linux/Unix platforms.
+
+To build:
+
+1) Go to the src directory in the distribution.
+2) Make sure the Microsoft command-line tools (e.g. nmake) are available.
+3) Run "nmake -f Makefile.msft". This should run some tests, which
+may print warnings about the types of the "Interlocked" functions.
+I haven't been able to make all versions of VC++ happy. If you know
+how to, please send a patch.
+4) To compile applications, you will need to retain or copy the following
+pieces from the resulting src directory contents:
+ "atomic_ops.h" - Header file defining low-level primitives. This
+ includes files from:
+ "atomic_ops"- Subdirectory containing implementation header files.
+ "atomic_ops_stack.h" - Header file describing almost lock-free stack.
+ "atomic_ops_malloc.h" - Header file describing almost lock-free malloc.
+ "libatomic_ops_gpl.lib" - Library containing implementation of the
+ above two (plus AO_pause() defined in atomic_ops.c).
+ The atomic_ops.h implementation is entirely in the
+ header files in Win32.
+
+Most clients of atomic_ops.h will need to define AO_ASSUME_WINDOWS98 before
+including it. Compare_and_swap is otherwise not available.
+Defining AO_ASSUME_VISTA will make compare_double_and_swap_double available
+as well.
+
+Note that the library is covered by the GNU General Public License, while
+the top 2 of these pieces allow use in proprietary code.
--- /dev/null
+# Place holder to keep this directory in the Git repository.
+*
+!.gitignore
--- /dev/null
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+top_builddir=@abs_top_builddir@
+top_srcdir=@abs_top_srcdir@
+
+Name: The atomic_ops library (uninstalled)
+Description: Atomic memory update operations
+Version: @PACKAGE_VERSION@
+Libs: ${top_builddir}/src/libatomic_ops.la
+Cflags: -I${top_builddir}/src -I${top_srcdir}/src
--- /dev/null
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: The atomic_ops library
+Description: Atomic memory update operations portable implementation
+Version: @PACKAGE_VERSION@
+Libs: -L${libdir} -latomic_ops
+Cflags: -I${includedir}
--- /dev/null
+AM_CFLAGS=@PICFLAG@
+AM_CPPFLAGS = -I$(top_builddir)/src -I$(top_srcdir)/src
+
+include_HEADERS = atomic_ops.h atomic_ops_stack.h atomic_ops_malloc.h
+lib_LTLIBRARIES = libatomic_ops.la libatomic_ops_gpl.la
+if NEED_ASM
+libatomic_ops_la_SOURCES = atomic_ops.c atomic_ops_sysdeps.S
+else
+libatomic_ops_la_SOURCES = atomic_ops.c
+endif
+libatomic_ops_la_LDFLAGS = -version-info 1:3:0 -no-undefined
+
+libatomic_ops_gpl_la_SOURCES = atomic_ops_stack.c atomic_ops_malloc.c
+libatomic_ops_gpl_la_LDFLAGS = -version-info 1:3:0 -no-undefined
+libatomic_ops_gpl_la_LIBADD = libatomic_ops.la
+
+EXTRA_DIST = Makefile.msft atomic_ops/sysdeps/README \
+ atomic_ops/generalize-arithm.template \
+ atomic_ops/generalize-small.template \
+ atomic_ops/sysdeps/ao_t_is_int.template \
+ atomic_ops/sysdeps/gcc/generic-arithm.template \
+ atomic_ops/sysdeps/gcc/generic-small.template \
+ atomic_ops/sysdeps/loadstore/acquire_release_volatile.template \
+ atomic_ops/sysdeps/loadstore/atomic_load.template \
+ atomic_ops/sysdeps/loadstore/atomic_store.template \
+ atomic_ops/sysdeps/loadstore/ordered_loads_only.template \
+ atomic_ops/sysdeps/loadstore/ordered_stores_only.template \
+ atomic_ops/sysdeps/sunc/sparc.S
+
+BUILT_SOURCES = atomic_ops/generalize-arithm.h \
+ atomic_ops/generalize-small.h \
+ atomic_ops/sysdeps/ao_t_is_int.h \
+ atomic_ops/sysdeps/gcc/generic-arithm.h \
+ atomic_ops/sysdeps/gcc/generic-small.h \
+ atomic_ops/sysdeps/loadstore/acquire_release_volatile.h \
+ atomic_ops/sysdeps/loadstore/atomic_load.h \
+ atomic_ops/sysdeps/loadstore/atomic_store.h \
+ atomic_ops/sysdeps/loadstore/char_acquire_release_volatile.h \
+ atomic_ops/sysdeps/loadstore/char_atomic_load.h \
+ atomic_ops/sysdeps/loadstore/char_atomic_store.h \
+ atomic_ops/sysdeps/loadstore/int_acquire_release_volatile.h \
+ atomic_ops/sysdeps/loadstore/int_atomic_load.h \
+ atomic_ops/sysdeps/loadstore/int_atomic_store.h \
+ atomic_ops/sysdeps/loadstore/ordered_loads_only.h \
+ atomic_ops/sysdeps/loadstore/ordered_stores_only.h \
+ atomic_ops/sysdeps/loadstore/short_acquire_release_volatile.h \
+ atomic_ops/sysdeps/loadstore/short_atomic_load.h \
+ atomic_ops/sysdeps/loadstore/short_atomic_store.h
+
+#Private Headers
+privatedir=${includedir}/
+nobase_private_HEADERS = atomic_ops/ao_version.h \
+ atomic_ops/generalize.h \
+ $(BUILT_SOURCES) \
+ \
+ atomic_ops/sysdeps/all_acquire_release_volatile.h \
+ atomic_ops/sysdeps/all_aligned_atomic_load_store.h \
+ atomic_ops/sysdeps/all_atomic_load_store.h \
+ atomic_ops/sysdeps/all_atomic_only_load.h \
+ atomic_ops/sysdeps/emul_cas.h \
+ atomic_ops/sysdeps/generic_pthread.h \
+ atomic_ops/sysdeps/ordered.h \
+ atomic_ops/sysdeps/ordered_except_wr.h \
+ atomic_ops/sysdeps/read_ordered.h \
+ atomic_ops/sysdeps/standard_ao_double_t.h \
+ atomic_ops/sysdeps/test_and_set_t_is_ao_t.h \
+ atomic_ops/sysdeps/test_and_set_t_is_char.h \
+ \
+ atomic_ops/sysdeps/armcc/arm_v6.h \
+ \
+ atomic_ops/sysdeps/gcc/aarch64.h \
+ atomic_ops/sysdeps/gcc/alpha.h \
+ atomic_ops/sysdeps/gcc/arm.h \
+ atomic_ops/sysdeps/gcc/avr32.h \
+ atomic_ops/sysdeps/gcc/cris.h \
+ atomic_ops/sysdeps/gcc/generic.h \
+ atomic_ops/sysdeps/gcc/hexagon.h \
+ atomic_ops/sysdeps/gcc/hppa.h \
+ atomic_ops/sysdeps/gcc/ia64.h \
+ atomic_ops/sysdeps/gcc/m68k.h \
+ atomic_ops/sysdeps/gcc/mips.h \
+ atomic_ops/sysdeps/gcc/powerpc.h \
+ atomic_ops/sysdeps/gcc/s390.h \
+ atomic_ops/sysdeps/gcc/sh.h \
+ atomic_ops/sysdeps/gcc/sparc.h \
+ atomic_ops/sysdeps/gcc/x86.h \
+ \
+ atomic_ops/sysdeps/hpc/hppa.h \
+ atomic_ops/sysdeps/hpc/ia64.h \
+ \
+ atomic_ops/sysdeps/ibmc/powerpc.h \
+ \
+ atomic_ops/sysdeps/icc/ia64.h \
+ \
+ atomic_ops/sysdeps/loadstore/double_atomic_load_store.h \
+ \
+ atomic_ops/sysdeps/msftc/arm.h \
+ atomic_ops/sysdeps/msftc/common32_defs.h \
+ atomic_ops/sysdeps/msftc/x86.h \
+ atomic_ops/sysdeps/msftc/x86_64.h \
+ \
+ atomic_ops/sysdeps/sunc/sparc.h \
+ atomic_ops/sysdeps/sunc/x86.h
+
+atomic_ops/generalize-small.h: atomic_ops/generalize-small.template
+ mkdir -p `dirname $@`
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@
+ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g $? >> $@
+
+atomic_ops/generalize-arithm.h: atomic_ops/generalize-arithm.template
+ mkdir -p `dirname $@`
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@
+
+atomic_ops/sysdeps/ao_t_is_int.h: atomic_ops/sysdeps/ao_t_is_int.template
+ mkdir -p `dirname $@`
+ sed -e s:_XBAR::g $? > $@
+ sed -e s:XBAR:full:g $? >> $@
+ sed -e s:XBAR:acquire:g $? >> $@
+ sed -e s:XBAR:release:g $? >> $@
+ sed -e s:XBAR:write:g $? >> $@
+ sed -e s:XBAR:read:g $? >> $@
+
+atomic_ops/sysdeps/gcc/generic-arithm.h: \
+ atomic_ops/sysdeps/gcc/generic-arithm.template
+ mkdir -p `dirname $@`
+ sed -e s:_XBAR::g -e s:XGCCBAR:RELAXED:g \
+ -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@
+ sed -e s:_XBAR::g -e s:XGCCBAR:RELAXED:g \
+ -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@
+ sed -e s:_XBAR::g -e s:XGCCBAR:RELAXED:g \
+ -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@
+ sed -e s:_XBAR::g -e s:XGCCBAR:RELAXED:g \
+ -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@
+ sed -e s:XBAR:acquire:g -e s:XGCCBAR:ACQUIRE:g \
+ -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? >> $@
+ sed -e s:XBAR:acquire:g -e s:XGCCBAR:ACQUIRE:g \
+ -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@
+ sed -e s:XBAR:acquire:g -e s:XGCCBAR:ACQUIRE:g \
+ -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@
+ sed -e s:XBAR:acquire:g -e s:XGCCBAR:ACQUIRE:g \
+ -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@
+ sed -e s:XBAR:release:g -e s:XGCCBAR:RELEASE:g \
+ -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? >> $@
+ sed -e s:XBAR:release:g -e s:XGCCBAR:RELEASE:g \
+ -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@
+ sed -e s:XBAR:release:g -e s:XGCCBAR:RELEASE:g \
+ -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@
+ sed -e s:XBAR:release:g -e s:XGCCBAR:RELEASE:g \
+ -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@
+ sed -e s:XBAR:full:g -e s:XGCCBAR:SEQ_CST:g \
+ -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? >> $@
+ sed -e s:XBAR:full:g -e s:XGCCBAR:SEQ_CST:g \
+ -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@
+ sed -e s:XBAR:full:g -e s:XGCCBAR:SEQ_CST:g \
+ -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@
+ sed -e s:XBAR:full:g -e s:XGCCBAR:SEQ_CST:g \
+ -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@
+
+atomic_ops/sysdeps/gcc/generic-small.h: \
+ atomic_ops/sysdeps/gcc/generic-small.template
+ mkdir -p `dirname $@`
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@
+
+atomic_ops/sysdeps/loadstore/ordered_loads_only.h: \
+ atomic_ops/sysdeps/loadstore/ordered_loads_only.template
+ mkdir -p `dirname $@`
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@
+ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g $? >> $@
+
+atomic_ops/sysdeps/loadstore/ordered_stores_only.h: \
+ atomic_ops/sysdeps/loadstore/ordered_stores_only.template
+ mkdir -p `dirname $@`
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@
+ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g $? >> $@
+
+atomic_ops/sysdeps/loadstore/acquire_release_volatile.h: \
+ atomic_ops/sysdeps/loadstore/acquire_release_volatile.template
+ mkdir -p `dirname $@`
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? > $@
+
+atomic_ops/sysdeps/loadstore/char_acquire_release_volatile.h: \
+ atomic_ops/sysdeps/loadstore/acquire_release_volatile.template
+ mkdir -p `dirname $@`
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@
+
+atomic_ops/sysdeps/loadstore/int_acquire_release_volatile.h: \
+ atomic_ops/sysdeps/loadstore/acquire_release_volatile.template
+ mkdir -p `dirname $@`
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? > $@
+
+atomic_ops/sysdeps/loadstore/short_acquire_release_volatile.h: \
+ atomic_ops/sysdeps/loadstore/acquire_release_volatile.template
+ mkdir -p `dirname $@`
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? > $@
+
+atomic_ops/sysdeps/loadstore/atomic_load.h: \
+ atomic_ops/sysdeps/loadstore/atomic_load.template
+ mkdir -p `dirname $@`
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? > $@
+
+atomic_ops/sysdeps/loadstore/char_atomic_load.h: \
+ atomic_ops/sysdeps/loadstore/atomic_load.template
+ mkdir -p `dirname $@`
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@
+
+atomic_ops/sysdeps/loadstore/int_atomic_load.h: \
+ atomic_ops/sysdeps/loadstore/atomic_load.template
+ mkdir -p `dirname $@`
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? > $@
+
+atomic_ops/sysdeps/loadstore/short_atomic_load.h: \
+ atomic_ops/sysdeps/loadstore/atomic_load.template
+ mkdir -p `dirname $@`
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? > $@
+
+atomic_ops/sysdeps/loadstore/atomic_store.h: \
+ atomic_ops/sysdeps/loadstore/atomic_store.template
+ mkdir -p `dirname $@`
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? > $@
+
+atomic_ops/sysdeps/loadstore/char_atomic_store.h: \
+ atomic_ops/sysdeps/loadstore/atomic_store.template
+ mkdir -p `dirname $@`
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@
+
+atomic_ops/sysdeps/loadstore/int_atomic_store.h: \
+ atomic_ops/sysdeps/loadstore/atomic_store.template
+ mkdir -p `dirname $@`
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? > $@
+
+atomic_ops/sysdeps/loadstore/short_atomic_store.h: \
+ atomic_ops/sysdeps/loadstore/atomic_store.template
+ mkdir -p `dirname $@`
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? > $@
--- /dev/null
+#
+# Copyright (c) 2003-2005 Hewlett-Packard Development Company, L.P.
+#
+# The really trivial win32/VC++ Makefile. Note that atomic_ops.c defines
+# only AO_pause (used by atomic_ops_stack).
+# And we rely on a pre-built test_atomic_include.h and generalize-small.h,
+# since we can't rely on sed. But we don't keep test_atomic_include.h in
+# the development repository any longer, so if you want to do "make check"
+# for the sources obtained from the repository then
+# do "autoreconf -vif; ./configure; make check" in Cygwin first.
+# Win32 clients only need to include the header files.
+# To install, copy atomic_ops.h and the atomic_ops/... tree to your favorite
+# include directory.
+
+#MY_CPU=X86
+#CPU=$(MY_CPU)
+#!include <ntwin32.mak>
+
+CFLAGS=-O2 -W3 -DAO_ASSUME_WINDOWS98
+
+LIB_OBJS=atomic_ops.obj atomic_ops_malloc.obj atomic_ops_stack.obj
+
+all: libatomic_ops_gpl.lib
+
+atomic_ops.obj:
+ cl $(CFLAGS) -c atomic_ops.c
+
+atomic_ops_stack.obj:
+ cl $(CFLAGS) -c atomic_ops_stack.c
+
+atomic_ops_malloc.obj:
+ cl $(CFLAGS) -c atomic_ops_malloc.c
+
+libatomic_ops_gpl.lib: $(LIB_OBJS)
+ lib /out:libatomic_ops_gpl.lib $(LIB_OBJS)
+
+test_atomic: ..\tests\test_atomic.c ..\tests\test_atomic_include.h
+ cl $(CFLAGS) -I. ..\tests\test_atomic.c /Fo.\test_atomic
+
+test_atomic_w95: ..\tests\test_atomic.c ..\tests\test_atomic_include.h
+ cl -W3 -O2 -I. ..\tests\test_atomic.c /Fo.\test_atomic_w95
+
+test_malloc: ..\tests\test_malloc.c libatomic_ops_gpl.lib
+ cl $(CFLAGS) -I. ..\tests\test_malloc.c /Fo.\test_malloc \
+ libatomic_ops_gpl.lib
+
+test_stack: ..\tests\test_stack.c libatomic_ops_gpl.lib
+ cl $(CFLAGS) -I. ..\tests\test_stack.c /Fo.\test_stack \
+ libatomic_ops_gpl.lib
+
+check: test_atomic test_atomic_w95 test_malloc test_stack
+ @echo "The following will print lots of 'Missing ...' messages"
+ test_atomic_w95
+ @echo "The following will print some 'Missing ...' messages"
+ test_atomic
+ test_malloc
+ test_stack
+
+clean:
+ del *.exe *.obj libatomic_ops_gpl.lib
--- /dev/null
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * Initialized data and out-of-line functions to support atomic_ops.h
+ * go here. Currently this is needed only for pthread-based atomics
+ * emulation, or for compare-and-swap emulation.
+ * Pthreads emulation isn't useful on a native Windows platform, and
+ * cas emulation is not needed. Thus we skip this on Windows.
+ */
+
+#if defined(HAVE_CONFIG_H)
+# include "config.h"
+#endif
+
+#if defined(__native_client__) && !defined(AO_USE_NO_SIGNALS) \
+ && !defined(AO_USE_NANOSLEEP)
+ /* Since NaCl is not recognized by configure yet, we do it here. */
+# define AO_USE_NO_SIGNALS
+# define AO_USE_NANOSLEEP
+#endif
+
+#if defined(AO_USE_WIN32_PTHREADS) && !defined(AO_USE_NO_SIGNALS)
+# define AO_USE_NO_SIGNALS
+#endif
+
+#undef AO_REQUIRE_CAS
+#include "atomic_ops.h" /* Without cas emulation! */
+
+#if !defined(_MSC_VER) && !defined(__MINGW32__) && !defined(__BORLANDC__) \
+ || defined(AO_USE_NO_SIGNALS)
+
+#ifndef AO_NO_PTHREADS
+# include <pthread.h>
+#endif
+
+#ifndef AO_USE_NO_SIGNALS
+# include <signal.h>
+#endif
+
+#ifdef AO_USE_NANOSLEEP
+ /* This requires _POSIX_TIMERS feature. */
+# include <sys/time.h>
+# include <time.h>
+#elif defined(AO_USE_WIN32_PTHREADS)
+# include <windows.h> /* for Sleep() */
+#elif defined(_HPUX_SOURCE)
+# include <sys/time.h>
+#else
+# include <sys/select.h>
+#endif
+
+#ifndef AO_HAVE_double_t
+# include "atomic_ops/sysdeps/standard_ao_double_t.h"
+#endif
+
+/* Lock for pthreads-based implementation. */
+#ifndef AO_NO_PTHREADS
+ pthread_mutex_t AO_pt_lock = PTHREAD_MUTEX_INITIALIZER;
+#endif
+
+/*
+ * Out of line compare-and-swap emulation based on test and set.
+ *
+ * We use a small table of locks for different compare_and_swap locations.
+ * Before we update perform a compare-and-swap, we grab the corresponding
+ * lock. Different locations may hash to the same lock, but since we
+ * never acquire more than one lock at a time, this can't deadlock.
+ * We explicitly disable signals while we perform this operation.
+ *
+ * TODO: Probably also support emulation based on Lamport
+ * locks, since we may not have test_and_set either.
+ */
+#define AO_HASH_SIZE 16
+
+#define AO_HASH(x) (((unsigned long)(x) >> 12) & (AO_HASH_SIZE-1))
+
+AO_TS_t AO_locks[AO_HASH_SIZE] = {
+ AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER,
+ AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER,
+ AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER,
+ AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER,
+};
+
+void AO_pause(int); /* defined below */
+
+static void lock_ool(volatile AO_TS_t *l)
+{
+ int i = 0;
+
+ while (AO_test_and_set_acquire(l) == AO_TS_SET)
+ AO_pause(++i);
+}
+
+AO_INLINE void lock(volatile AO_TS_t *l)
+{
+ if (AO_EXPECT_FALSE(AO_test_and_set_acquire(l) == AO_TS_SET))
+ lock_ool(l);
+}
+
+AO_INLINE void unlock(volatile AO_TS_t *l)
+{
+ AO_CLEAR(l);
+}
+
+#ifndef AO_USE_NO_SIGNALS
+ static sigset_t all_sigs;
+ static volatile AO_t initialized = 0;
+ static volatile AO_TS_t init_lock = AO_TS_INITIALIZER;
+
+ AO_INLINE void block_all_signals(sigset_t *old_sigs_ptr)
+ {
+ if (AO_EXPECT_FALSE(!AO_load_acquire(&initialized)))
+ {
+ lock(&init_lock);
+ if (!initialized)
+ sigfillset(&all_sigs);
+ unlock(&init_lock);
+ AO_store_release(&initialized, 1);
+ }
+ sigprocmask(SIG_BLOCK, &all_sigs, old_sigs_ptr);
+ /* Neither sigprocmask nor pthread_sigmask is 100% */
+ /* guaranteed to work here. Sigprocmask is not */
+ /* guaranteed be thread safe, and pthread_sigmask */
+ /* is not async-signal-safe. Under linuxthreads, */
+ /* sigprocmask may block some pthreads-internal */
+ /* signals. So long as we do that for short periods, */
+ /* we should be OK. */
+ }
+#endif /* !AO_USE_NO_SIGNALS */
+
+AO_t AO_fetch_compare_and_swap_emulation(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+{
+ AO_TS_t *my_lock = AO_locks + AO_HASH(addr);
+ AO_t fetched_val;
+
+# ifndef AO_USE_NO_SIGNALS
+ sigset_t old_sigs;
+ block_all_signals(&old_sigs);
+# endif
+ lock(my_lock);
+ fetched_val = *addr;
+ if (fetched_val == old_val)
+ *addr = new_val;
+ unlock(my_lock);
+# ifndef AO_USE_NO_SIGNALS
+ sigprocmask(SIG_SETMASK, &old_sigs, NULL);
+# endif
+ return fetched_val;
+}
+
+int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr,
+ AO_t old_val1, AO_t old_val2,
+ AO_t new_val1, AO_t new_val2)
+{
+ AO_TS_t *my_lock = AO_locks + AO_HASH(addr);
+ int result;
+
+# ifndef AO_USE_NO_SIGNALS
+ sigset_t old_sigs;
+ block_all_signals(&old_sigs);
+# endif
+ lock(my_lock);
+ if (addr -> AO_val1 == old_val1 && addr -> AO_val2 == old_val2)
+ {
+ addr -> AO_val1 = new_val1;
+ addr -> AO_val2 = new_val2;
+ result = 1;
+ }
+ else
+ result = 0;
+ unlock(my_lock);
+# ifndef AO_USE_NO_SIGNALS
+ sigprocmask(SIG_SETMASK, &old_sigs, NULL);
+# endif
+ return result;
+}
+
+void AO_store_full_emulation(volatile AO_t *addr, AO_t val)
+{
+ AO_TS_t *my_lock = AO_locks + AO_HASH(addr);
+ lock(my_lock);
+ *addr = val;
+ unlock(my_lock);
+}
+
+#else /* Non-posix platform */
+
+# include <windows.h>
+
+# define AO_USE_WIN32_PTHREADS
+ /* define to use Sleep() */
+
+ extern int AO_non_posix_implementation_is_entirely_in_headers;
+
+#endif
+
+static AO_t spin_dummy = 1;
+
+/* Spin for 2**n units. */
+static void AO_spin(int n)
+{
+ AO_t j = AO_load(&spin_dummy);
+ int i = 2 << n;
+
+ while (i-- > 0)
+ j += (j - 1) << 2;
+ /* Given 'spin_dummy' is initialized to 1, j is 1 after the loop. */
+ AO_store(&spin_dummy, j);
+}
+
+void AO_pause(int n)
+{
+ if (n < 12)
+ AO_spin(n);
+ else
+ {
+# ifdef AO_USE_NANOSLEEP
+ struct timespec ts;
+ ts.tv_sec = 0;
+ ts.tv_nsec = (n > 28 ? 100000 * 1000 : 1 << (n - 2));
+ nanosleep(&ts, 0);
+# elif defined(AO_USE_WIN32_PTHREADS)
+ Sleep(n > 28 ? 100 : n < 22 ? 1 : 1 << (n - 22)); /* in millis */
+# else
+ struct timeval tv;
+ /* Short async-signal-safe sleep. */
+ tv.tv_sec = 0;
+ tv.tv_usec = n > 28 ? 100000 : 1 << (n - 12);
+ select(0, 0, 0, 0, &tv);
+# endif
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef AO_ATOMIC_OPS_H
+#define AO_ATOMIC_OPS_H
+
+#include "atomic_ops/ao_version.h"
+ /* Define version numbers here to allow */
+ /* test on build machines for cross-builds. */
+
+#include <assert.h>
+#include <stddef.h>
+
+/* We define various atomic operations on memory in a */
+/* machine-specific way. Unfortunately, this is complicated */
+/* by the fact that these may or may not be combined with */
+/* various memory barriers. Thus the actual operations we */
+/* define have the form AO_<atomic-op>_<barrier>, for all */
+/* plausible combinations of <atomic-op> and <barrier>. */
+/* This of course results in a mild combinatorial explosion. */
+/* To deal with it, we try to generate derived */
+/* definitions for as many of the combinations as we can, as */
+/* automatically as possible. */
+/* */
+/* Our assumption throughout is that the programmer will */
+/* specify the least demanding operation and memory barrier */
+/* that will guarantee correctness for the implementation. */
+/* Our job is to find the least expensive way to implement it */
+/* on the applicable hardware. In many cases that will */
+/* involve, for example, a stronger memory barrier, or a */
+/* combination of hardware primitives. */
+/* */
+/* Conventions: */
+/* "plain" atomic operations are not guaranteed to include */
+/* a barrier. The suffix in the name specifies the barrier */
+/* type. Suffixes are: */
+/* _release: Earlier operations may not be delayed past it. */
+/* _acquire: Later operations may not move ahead of it. */
+/* _read: Subsequent reads must follow this operation and */
+/* preceding reads. */
+/* _write: Earlier writes precede both this operation and */
+/* later writes. */
+/* _full: Ordered with respect to both earlier and later memory */
+/* operations. */
+/* _release_write: Ordered with respect to earlier writes. */
+/* _acquire_read: Ordered with respect to later reads. */
+/* */
+/* Currently we try to define the following atomic memory */
+/* operations, in combination with the above barriers: */
+/* AO_nop */
+/* AO_load */
+/* AO_store */
+/* AO_test_and_set (binary) */
+/* AO_fetch_and_add */
+/* AO_fetch_and_add1 */
+/* AO_fetch_and_sub1 */
+/* AO_and */
+/* AO_or */
+/* AO_xor */
+/* AO_compare_and_swap */
+/* AO_fetch_compare_and_swap */
+/* */
+/* Note that atomicity guarantees are valid only if both */
+/* readers and writers use AO_ operations to access the */
+/* shared value, while ordering constraints are intended to */
+/* apply all memory operations. If a location can potentially */
+/* be accessed simultaneously from multiple threads, and one of */
+/* those accesses may be a write access, then all such */
+/* accesses to that location should be through AO_ primitives. */
+/* However if AO_ operations enforce sufficient ordering to */
+/* ensure that a location x cannot be accessed concurrently, */
+/* or can only be read concurrently, then x can be accessed */
+/* via ordinary references and assignments. */
+/* */
+/* AO_compare_and_swap takes an address and an expected old */
+/* value and a new value, and returns an int. Non-zero result */
+/* indicates that it succeeded. */
+/* AO_fetch_compare_and_swap takes an address and an expected */
+/* old value and a new value, and returns the real old value. */
+/* The operation succeeded if and only if the expected old */
+/* value matches the old value returned. */
+/* */
+/* Test_and_set takes an address, atomically replaces it by */
+/* AO_TS_SET, and returns the prior value. */
+/* An AO_TS_t location can be reset with the */
+/* AO_CLEAR macro, which normally uses AO_store_release. */
+/* AO_fetch_and_add takes an address and an AO_t increment */
+/* value. The AO_fetch_and_add1 and AO_fetch_and_sub1 variants */
+/* are provided, since they allow faster implementations on */
+/* some hardware. AO_and, AO_or, AO_xor do atomically and, or, */
+/* xor (respectively) an AO_t value into a memory location, */
+/* but do not provide access to the original. */
+/* */
+/* We expect this list to grow slowly over time. */
+/* */
+/* Note that AO_nop_full is a full memory barrier. */
+/* */
+/* Note that if some data is initialized with */
+/* data.x = ...; data.y = ...; ... */
+/* AO_store_release_write(&data_is_initialized, 1) */
+/* then data is guaranteed to be initialized after the test */
+/* if (AO_load_acquire_read(&data_is_initialized)) ... */
+/* succeeds. Furthermore, this should generate near-optimal */
+/* code on all common platforms. */
+/* */
+/* All operations operate on unsigned AO_t, which */
+/* is the natural word size, and usually unsigned long. */
+/* It is possible to check whether a particular operation op */
+/* is available on a particular platform by checking whether */
+/* AO_HAVE_op is defined. We make heavy use of these macros */
+/* internally. */
+
+/* The rest of this file basically has three sections: */
+/* */
+/* Some utility and default definitions. */
+/* */
+/* The architecture dependent section: */
+/* This defines atomic operations that have direct hardware */
+/* support on a particular platform, mostly by including the */
+/* appropriate compiler- and hardware-dependent file. */
+/* */
+/* The synthesis section: */
+/* This tries to define other atomic operations in terms of */
+/* those that are explicitly available on the platform. */
+/* This section is hardware independent. */
+/* We make no attempt to synthesize operations in ways that */
+/* effectively introduce locks, except for the debugging/demo */
+/* pthread-based implementation at the beginning. A more */
+/* realistic implementation that falls back to locks could be */
+/* added as a higher layer. But that would sacrifice */
+/* usability from signal handlers. */
+/* The synthesis section is implemented almost entirely in */
+/* atomic_ops/generalize.h. */
+
+/* Some common defaults. Overridden for some architectures. */
+#define AO_t size_t
+
+/* The test_and_set primitive returns an AO_TS_VAL_t value. */
+/* AO_TS_t is the type of an in-memory test-and-set location. */
+
+#define AO_TS_INITIALIZER (AO_t)AO_TS_CLEAR
+
+/* Platform-dependent stuff: */
+#if (defined(__GNUC__) || defined(_MSC_VER) || defined(__INTEL_COMPILER) \
+ || defined(__DMC__) || defined(__WATCOMC__)) && !defined(AO_NO_INLINE)
+# define AO_INLINE static __inline
+#elif defined(__sun) && !defined(AO_NO_INLINE)
+# define AO_INLINE static inline
+#else
+# define AO_INLINE static
+#endif
+
+#if __GNUC__ >= 3 && !defined(LINT2)
+# define AO_EXPECT_FALSE(expr) __builtin_expect(expr, 0)
+ /* Equivalent to (expr) but predict that usually (expr) == 0. */
+#else
+# define AO_EXPECT_FALSE(expr) (expr)
+#endif /* !__GNUC__ */
+
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+# define AO_compiler_barrier() __asm__ __volatile__("" : : : "memory")
+#elif defined(_MSC_VER) || defined(__DMC__) || defined(__BORLANDC__) \
+ || defined(__WATCOMC__)
+# if defined(_AMD64_) || defined(_M_X64) || _MSC_VER >= 1400
+# if defined(_WIN32_WCE)
+/* # include <cmnintrin.h> */
+# elif defined(_MSC_VER)
+# include <intrin.h>
+# endif
+# pragma intrinsic(_ReadWriteBarrier)
+# define AO_compiler_barrier() _ReadWriteBarrier()
+ /* We assume this does not generate a fence instruction. */
+ /* The documentation is a bit unclear. */
+# else
+# define AO_compiler_barrier() __asm { }
+ /* The preceding implementation may be preferable here too. */
+ /* But the documentation warns about VC++ 2003 and earlier. */
+# endif
+#elif defined(__INTEL_COMPILER)
+# define AO_compiler_barrier() __memory_barrier()
+ /* FIXME: Too strong? IA64-only? */
+#elif defined(_HPUX_SOURCE)
+# if defined(__ia64)
+# include <machine/sys/inline.h>
+# define AO_compiler_barrier() _Asm_sched_fence()
+# else
+ /* FIXME - We dont know how to do this. This is a guess. */
+ /* And probably a bad one. */
+ static volatile int AO_barrier_dummy;
+# define AO_compiler_barrier() (void)(AO_barrier_dummy = AO_barrier_dummy)
+# endif
+#else
+ /* We conjecture that the following usually gives us the right */
+ /* semantics or an error. */
+# define AO_compiler_barrier() asm("")
+#endif
+
+#if defined(AO_USE_PTHREAD_DEFS)
+# include "atomic_ops/sysdeps/generic_pthread.h"
+#endif /* AO_USE_PTHREAD_DEFS */
+
+#if (defined(__CC_ARM) || defined(__ARMCC__)) && !defined(__GNUC__) \
+ && !defined(AO_USE_PTHREAD_DEFS)
+# include "atomic_ops/sysdeps/armcc/arm_v6.h"
+# define AO_GENERALIZE_TWICE
+#endif
+
+#if defined(__GNUC__) && !defined(AO_USE_PTHREAD_DEFS) \
+ && !defined(__INTEL_COMPILER)
+# if defined(__i386__)
+ /* We don't define AO_USE_SYNC_CAS_BUILTIN for x86 here because */
+ /* it might require specifying additional options (like -march) */
+ /* or additional link libraries (if -march is not specified). */
+# include "atomic_ops/sysdeps/gcc/x86.h"
+# endif /* __i386__ */
+# if defined(__x86_64__)
+# if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)) \
+ && !defined(AO_USE_SYNC_CAS_BUILTIN)
+ /* It is safe to use __sync CAS built-in on this architecture. */
+# define AO_USE_SYNC_CAS_BUILTIN
+# endif
+# include "atomic_ops/sysdeps/gcc/x86.h"
+# endif /* __x86_64__ */
+# if defined(__ia64__)
+# include "atomic_ops/sysdeps/gcc/ia64.h"
+# define AO_GENERALIZE_TWICE
+# endif /* __ia64__ */
+# if defined(__hppa__)
+# include "atomic_ops/sysdeps/gcc/hppa.h"
+# define AO_CAN_EMUL_CAS
+# endif /* __hppa__ */
+# if defined(__alpha__)
+# include "atomic_ops/sysdeps/gcc/alpha.h"
+# define AO_GENERALIZE_TWICE
+# endif /* __alpha__ */
+# if defined(__s390__)
+# include "atomic_ops/sysdeps/gcc/s390.h"
+# endif /* __s390__ */
+# if defined(__sparc__)
+# include "atomic_ops/sysdeps/gcc/sparc.h"
+# define AO_CAN_EMUL_CAS
+# endif /* __sparc__ */
+# if defined(__m68k__)
+# include "atomic_ops/sysdeps/gcc/m68k.h"
+# endif /* __m68k__ */
+# if defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
+ || defined(__powerpc64__) || defined(__ppc64__)
+# include "atomic_ops/sysdeps/gcc/powerpc.h"
+# endif /* __powerpc__ */
+# if defined(__aarch64__)
+# include "atomic_ops/sysdeps/gcc/aarch64.h"
+# define AO_CAN_EMUL_CAS
+# endif /* __aarch64__ */
+# if defined(__arm__)
+# include "atomic_ops/sysdeps/gcc/arm.h"
+# define AO_CAN_EMUL_CAS
+# endif /* __arm__ */
+# if defined(__cris__) || defined(CRIS)
+# include "atomic_ops/sysdeps/gcc/cris.h"
+# define AO_GENERALIZE_TWICE
+# endif
+# if defined(__mips__)
+# include "atomic_ops/sysdeps/gcc/mips.h"
+# endif /* __mips__ */
+# if defined(__sh__) || defined(SH4)
+# include "atomic_ops/sysdeps/gcc/sh.h"
+# define AO_CAN_EMUL_CAS
+# endif /* __sh__ */
+# if defined(__avr32__)
+# include "atomic_ops/sysdeps/gcc/avr32.h"
+# endif
+# if defined(__hexagon__)
+# include "atomic_ops/sysdeps/gcc/hexagon.h"
+# endif
+#endif /* __GNUC__ && !AO_USE_PTHREAD_DEFS */
+
+#if (defined(__IBMC__) || defined(__IBMCPP__)) && !defined(__GNUC__) \
+ && !defined(AO_USE_PTHREAD_DEFS)
+# if defined(__powerpc__) || defined(__powerpc) || defined(__ppc__) \
+ || defined(__PPC__) || defined(_M_PPC) || defined(_ARCH_PPC) \
+ || defined(_ARCH_PWR)
+# include "atomic_ops/sysdeps/ibmc/powerpc.h"
+# define AO_GENERALIZE_TWICE
+# endif
+#endif
+
+#if defined(__INTEL_COMPILER) && !defined(AO_USE_PTHREAD_DEFS)
+# if defined(__ia64__)
+# include "atomic_ops/sysdeps/icc/ia64.h"
+# define AO_GENERALIZE_TWICE
+# endif
+# if defined(__GNUC__)
+ /* Intel Compiler in GCC compatible mode */
+# if defined(__i386__)
+# include "atomic_ops/sysdeps/gcc/x86.h"
+# endif /* __i386__ */
+# if defined(__x86_64__)
+# if (__INTEL_COMPILER > 1110) && !defined(AO_USE_SYNC_CAS_BUILTIN)
+# define AO_USE_SYNC_CAS_BUILTIN
+# endif
+# include "atomic_ops/sysdeps/gcc/x86.h"
+# endif /* __x86_64__ */
+# endif
+#endif
+
+#if defined(_HPUX_SOURCE) && !defined(__GNUC__) && !defined(AO_USE_PTHREAD_DEFS)
+# if defined(__ia64)
+# include "atomic_ops/sysdeps/hpc/ia64.h"
+# define AO_GENERALIZE_TWICE
+# else
+# include "atomic_ops/sysdeps/hpc/hppa.h"
+# define AO_CAN_EMUL_CAS
+# endif
+#endif
+
+#if defined(_MSC_VER) || defined(__DMC__) || defined(__BORLANDC__) \
+ || (defined(__WATCOMC__) && defined(__NT__))
+# if defined(_AMD64_) || defined(_M_X64)
+# include "atomic_ops/sysdeps/msftc/x86_64.h"
+# elif defined(_M_IX86) || defined(x86)
+# include "atomic_ops/sysdeps/msftc/x86.h"
+# elif defined(_M_ARM) || defined(ARM) || defined(_ARM_)
+# include "atomic_ops/sysdeps/msftc/arm.h"
+# define AO_GENERALIZE_TWICE
+# endif
+#endif
+
+#if defined(__sun) && !defined(__GNUC__) && !defined(AO_USE_PTHREAD_DEFS)
+ /* Note: use -DAO_USE_PTHREAD_DEFS if Sun CC does not handle inline asm. */
+# if defined(__i386) || defined(__x86_64) || defined(__amd64)
+# include "atomic_ops/sysdeps/sunc/x86.h"
+# endif
+#endif
+
+#if !defined(__GNUC__) && (defined(sparc) || defined(__sparc)) \
+ && !defined(AO_USE_PTHREAD_DEFS)
+# include "atomic_ops/sysdeps/sunc/sparc.h"
+# define AO_CAN_EMUL_CAS
+#endif
+
+#if defined(AO_REQUIRE_CAS) && !defined(AO_HAVE_compare_and_swap) \
+ && !defined(AO_HAVE_fetch_compare_and_swap) \
+ && !defined(AO_HAVE_compare_and_swap_full) \
+ && !defined(AO_HAVE_fetch_compare_and_swap_full) \
+ && !defined(AO_HAVE_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_fetch_compare_and_swap_acquire)
+# if defined(AO_CAN_EMUL_CAS)
+# include "atomic_ops/sysdeps/emul_cas.h"
+# else
+# error Cannot implement AO_compare_and_swap_full on this architecture.
+# endif
+#endif /* AO_REQUIRE_CAS && !AO_HAVE_compare_and_swap ... */
+
+/* The most common way to clear a test-and-set location */
+/* at the end of a critical section. */
+#if AO_AO_TS_T && !defined(AO_CLEAR)
+# define AO_CLEAR(addr) AO_store_release((AO_TS_t *)(addr), AO_TS_CLEAR)
+#endif
+#if AO_CHAR_TS_T && !defined(AO_CLEAR)
+# define AO_CLEAR(addr) AO_char_store_release((AO_TS_t *)(addr), AO_TS_CLEAR)
+#endif
+
+/* The generalization section. */
+#if !defined(AO_GENERALIZE_TWICE) && defined(AO_CAN_EMUL_CAS) \
+ && !defined(AO_HAVE_compare_and_swap_full) \
+ && !defined(AO_HAVE_fetch_compare_and_swap_full)
+# define AO_GENERALIZE_TWICE
+#endif
+
+/* Theoretically we should repeatedly include atomic_ops/generalize.h. */
+/* In fact, we observe that this converges after a small fixed number */
+/* of iterations, usually one. */
+#include "atomic_ops/generalize.h"
+
+#if !defined(AO_GENERALIZE_TWICE) \
+ && defined(AO_HAVE_compare_double_and_swap_double) \
+ && (!defined(AO_HAVE_double_load) || !defined(AO_HAVE_double_store))
+# define AO_GENERALIZE_TWICE
+#endif
+
+#ifdef AO_T_IS_INT
+ /* Included after the first generalization pass. */
+# include "atomic_ops/sysdeps/ao_t_is_int.h"
+# ifndef AO_GENERALIZE_TWICE
+ /* Always generalize again. */
+# define AO_GENERALIZE_TWICE
+# endif
+#endif /* AO_T_IS_INT */
+
+#ifdef AO_GENERALIZE_TWICE
+# include "atomic_ops/generalize.h"
+#endif
+
+/* For compatibility with version 0.4 and earlier */
+#define AO_TS_T AO_TS_t
+#define AO_T AO_t
+#define AO_TS_VAL AO_TS_VAL_t
+
+#endif /* !AO_ATOMIC_OPS_H */
--- /dev/null
+/*
+ * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef AO_ATOMIC_OPS_H
+# error This file should not be included directly.
+#endif
+
+/* The policy regarding version numbers: development code has odd */
+/* "minor" number (and "micro" part is 0); when development is finished */
+/* and a release is prepared, "minor" number is incremented (keeping */
+/* "micro" number still zero), whenever a defect is fixed a new release */
+/* is prepared incrementing "micro" part to odd value (the most stable */
+/* release has the biggest "micro" number). */
+
+/* The version here should match that in configure.ac and README. */
+#define AO_VERSION_MAJOR 7
+#define AO_VERSION_MINOR 5
+#define AO_VERSION_MICRO 0 /* 7.5.0 */
--- /dev/null
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* char_compare_and_swap (based on fetch_compare_and_swap) */
+#if defined(AO_HAVE_char_fetch_compare_and_swap_full) \
+ && !defined(AO_HAVE_char_compare_and_swap_full)
+ AO_INLINE int
+ AO_char_compare_and_swap_full(volatile unsigned/**/char *addr, unsigned/**/char old_val,
+ unsigned/**/char new_val)
+ {
+ return AO_char_fetch_compare_and_swap_full(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_char_compare_and_swap_full
+#endif
+
+#if defined(AO_HAVE_char_fetch_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_char_compare_and_swap_acquire)
+ AO_INLINE int
+ AO_char_compare_and_swap_acquire(volatile unsigned/**/char *addr, unsigned/**/char old_val,
+ unsigned/**/char new_val)
+ {
+ return AO_char_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_char_compare_and_swap_acquire
+#endif
+
+#if defined(AO_HAVE_char_fetch_compare_and_swap_release) \
+ && !defined(AO_HAVE_char_compare_and_swap_release)
+ AO_INLINE int
+ AO_char_compare_and_swap_release(volatile unsigned/**/char *addr, unsigned/**/char old_val,
+ unsigned/**/char new_val)
+ {
+ return AO_char_fetch_compare_and_swap_release(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_char_compare_and_swap_release
+#endif
+
+#if defined(AO_HAVE_char_fetch_compare_and_swap_write) \
+ && !defined(AO_HAVE_char_compare_and_swap_write)
+ AO_INLINE int
+ AO_char_compare_and_swap_write(volatile unsigned/**/char *addr, unsigned/**/char old_val,
+ unsigned/**/char new_val)
+ {
+ return AO_char_fetch_compare_and_swap_write(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_char_compare_and_swap_write
+#endif
+
+#if defined(AO_HAVE_char_fetch_compare_and_swap_read) \
+ && !defined(AO_HAVE_char_compare_and_swap_read)
+ AO_INLINE int
+ AO_char_compare_and_swap_read(volatile unsigned/**/char *addr, unsigned/**/char old_val,
+ unsigned/**/char new_val)
+ {
+ return AO_char_fetch_compare_and_swap_read(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_char_compare_and_swap_read
+#endif
+
+#if defined(AO_HAVE_char_fetch_compare_and_swap) \
+ && !defined(AO_HAVE_char_compare_and_swap)
+ AO_INLINE int
+ AO_char_compare_and_swap(volatile unsigned/**/char *addr, unsigned/**/char old_val,
+ unsigned/**/char new_val)
+ {
+ return AO_char_fetch_compare_and_swap(addr, old_val, new_val) == old_val;
+ }
+# define AO_HAVE_char_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_char_fetch_compare_and_swap_release_write) \
+ && !defined(AO_HAVE_char_compare_and_swap_release_write)
+ AO_INLINE int
+ AO_char_compare_and_swap_release_write(volatile unsigned/**/char *addr,
+ unsigned/**/char old_val, unsigned/**/char new_val)
+ {
+ return AO_char_fetch_compare_and_swap_release_write(addr, old_val,
+ new_val) == old_val;
+ }
+# define AO_HAVE_char_compare_and_swap_release_write
+#endif
+
+#if defined(AO_HAVE_char_fetch_compare_and_swap_acquire_read) \
+ && !defined(AO_HAVE_char_compare_and_swap_acquire_read)
+ AO_INLINE int
+ AO_char_compare_and_swap_acquire_read(volatile unsigned/**/char *addr,
+ unsigned/**/char old_val, unsigned/**/char new_val)
+ {
+ return AO_char_fetch_compare_and_swap_acquire_read(addr, old_val,
+ new_val) == old_val;
+ }
+# define AO_HAVE_char_compare_and_swap_acquire_read
+#endif
+
+#if defined(AO_HAVE_char_fetch_compare_and_swap_dd_acquire_read) \
+ && !defined(AO_HAVE_char_compare_and_swap_dd_acquire_read)
+ AO_INLINE int
+ AO_char_compare_and_swap_dd_acquire_read(volatile unsigned/**/char *addr,
+ unsigned/**/char old_val, unsigned/**/char new_val)
+ {
+ return AO_char_fetch_compare_and_swap_dd_acquire_read(addr, old_val,
+ new_val) == old_val;
+ }
+# define AO_HAVE_char_compare_and_swap_dd_acquire_read
+#endif
+
+/* char_fetch_and_add */
+/* We first try to implement fetch_and_add variants in terms of the */
+/* corresponding compare_and_swap variants to minimize adding barriers. */
+#if defined(AO_HAVE_char_compare_and_swap_full) \
+ && !defined(AO_HAVE_char_fetch_and_add_full)
+ AO_INLINE unsigned/**/char
+ AO_char_fetch_and_add_full(volatile unsigned/**/char *addr, unsigned/**/char incr)
+ {
+ unsigned/**/char old;
+
+ do
+ {
+ old = *(unsigned/**/char *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_full(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_char_fetch_and_add_full
+#endif
+
+#if defined(AO_HAVE_char_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_char_fetch_and_add_acquire)
+ AO_INLINE unsigned/**/char
+ AO_char_fetch_and_add_acquire(volatile unsigned/**/char *addr, unsigned/**/char incr)
+ {
+ unsigned/**/char old;
+
+ do
+ {
+ old = *(unsigned/**/char *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_acquire(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_char_fetch_and_add_acquire
+#endif
+
+#if defined(AO_HAVE_char_compare_and_swap_release) \
+ && !defined(AO_HAVE_char_fetch_and_add_release)
+ AO_INLINE unsigned/**/char
+ AO_char_fetch_and_add_release(volatile unsigned/**/char *addr, unsigned/**/char incr)
+ {
+ unsigned/**/char old;
+
+ do
+ {
+ old = *(unsigned/**/char *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_release(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_char_fetch_and_add_release
+#endif
+
+#if defined(AO_HAVE_char_compare_and_swap) \
+ && !defined(AO_HAVE_char_fetch_and_add)
+ AO_INLINE unsigned/**/char
+ AO_char_fetch_and_add(volatile unsigned/**/char *addr, unsigned/**/char incr)
+ {
+ unsigned/**/char old;
+
+ do
+ {
+ old = *(unsigned/**/char *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_char_compare_and_swap(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_char_fetch_and_add
+#endif
+
+#if defined(AO_HAVE_char_fetch_and_add_full)
+# if !defined(AO_HAVE_char_fetch_and_add_release)
+# define AO_char_fetch_and_add_release(addr, val) \
+ AO_char_fetch_and_add_full(addr, val)
+# define AO_HAVE_char_fetch_and_add_release
+# endif
+# if !defined(AO_HAVE_char_fetch_and_add_acquire)
+# define AO_char_fetch_and_add_acquire(addr, val) \
+ AO_char_fetch_and_add_full(addr, val)
+# define AO_HAVE_char_fetch_and_add_acquire
+# endif
+# if !defined(AO_HAVE_char_fetch_and_add_write)
+# define AO_char_fetch_and_add_write(addr, val) \
+ AO_char_fetch_and_add_full(addr, val)
+# define AO_HAVE_char_fetch_and_add_write
+# endif
+# if !defined(AO_HAVE_char_fetch_and_add_read)
+# define AO_char_fetch_and_add_read(addr, val) \
+ AO_char_fetch_and_add_full(addr, val)
+# define AO_HAVE_char_fetch_and_add_read
+# endif
+#endif /* AO_HAVE_char_fetch_and_add_full */
+
+#if defined(AO_HAVE_char_fetch_and_add) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_char_fetch_and_add_acquire)
+ AO_INLINE unsigned/**/char
+ AO_char_fetch_and_add_acquire(volatile unsigned/**/char *addr, unsigned/**/char incr)
+ {
+ unsigned/**/char result = AO_char_fetch_and_add(addr, incr);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_char_fetch_and_add_acquire
+#endif
+#if defined(AO_HAVE_char_fetch_and_add) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_char_fetch_and_add_release)
+# define AO_char_fetch_and_add_release(addr, incr) \
+ (AO_nop_full(), AO_char_fetch_and_add(addr, incr))
+# define AO_HAVE_char_fetch_and_add_release
+#endif
+
+#if !defined(AO_HAVE_char_fetch_and_add) \
+ && defined(AO_HAVE_char_fetch_and_add_release)
+# define AO_char_fetch_and_add(addr, val) \
+ AO_char_fetch_and_add_release(addr, val)
+# define AO_HAVE_char_fetch_and_add
+#endif
+#if !defined(AO_HAVE_char_fetch_and_add) \
+ && defined(AO_HAVE_char_fetch_and_add_acquire)
+# define AO_char_fetch_and_add(addr, val) \
+ AO_char_fetch_and_add_acquire(addr, val)
+# define AO_HAVE_char_fetch_and_add
+#endif
+#if !defined(AO_HAVE_char_fetch_and_add) \
+ && defined(AO_HAVE_char_fetch_and_add_write)
+# define AO_char_fetch_and_add(addr, val) \
+ AO_char_fetch_and_add_write(addr, val)
+# define AO_HAVE_char_fetch_and_add
+#endif
+#if !defined(AO_HAVE_char_fetch_and_add) \
+ && defined(AO_HAVE_char_fetch_and_add_read)
+# define AO_char_fetch_and_add(addr, val) \
+ AO_char_fetch_and_add_read(addr, val)
+# define AO_HAVE_char_fetch_and_add
+#endif
+
+#if defined(AO_HAVE_char_fetch_and_add_acquire) \
+ && defined(AO_HAVE_nop_full) && !defined(AO_HAVE_char_fetch_and_add_full)
+# define AO_char_fetch_and_add_full(addr, val) \
+ (AO_nop_full(), AO_char_fetch_and_add_acquire(addr, val))
+# define AO_HAVE_char_fetch_and_add_full
+#endif
+
+#if !defined(AO_HAVE_char_fetch_and_add_release_write) \
+ && defined(AO_HAVE_char_fetch_and_add_write)
+# define AO_char_fetch_and_add_release_write(addr, val) \
+ AO_char_fetch_and_add_write(addr, val)
+# define AO_HAVE_char_fetch_and_add_release_write
+#endif
+#if !defined(AO_HAVE_char_fetch_and_add_release_write) \
+ && defined(AO_HAVE_char_fetch_and_add_release)
+# define AO_char_fetch_and_add_release_write(addr, val) \
+ AO_char_fetch_and_add_release(addr, val)
+# define AO_HAVE_char_fetch_and_add_release_write
+#endif
+
+#if !defined(AO_HAVE_char_fetch_and_add_acquire_read) \
+ && defined(AO_HAVE_char_fetch_and_add_read)
+# define AO_char_fetch_and_add_acquire_read(addr, val) \
+ AO_char_fetch_and_add_read(addr, val)
+# define AO_HAVE_char_fetch_and_add_acquire_read
+#endif
+#if !defined(AO_HAVE_char_fetch_and_add_acquire_read) \
+ && defined(AO_HAVE_char_fetch_and_add_acquire)
+# define AO_char_fetch_and_add_acquire_read(addr, val) \
+ AO_char_fetch_and_add_acquire(addr, val)
+# define AO_HAVE_char_fetch_and_add_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_char_fetch_and_add_acquire_read)
+# define AO_char_fetch_and_add_dd_acquire_read(addr, val) \
+ AO_char_fetch_and_add_acquire_read(addr, val)
+# define AO_HAVE_char_fetch_and_add_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_char_fetch_and_add)
+# define AO_char_fetch_and_add_dd_acquire_read(addr, val) \
+ AO_char_fetch_and_add(addr, val)
+# define AO_HAVE_char_fetch_and_add_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* char_fetch_and_add1 */
+#if defined(AO_HAVE_char_fetch_and_add_full) \
+ && !defined(AO_HAVE_char_fetch_and_add1_full)
+# define AO_char_fetch_and_add1_full(addr) \
+ AO_char_fetch_and_add_full(addr, 1)
+# define AO_HAVE_char_fetch_and_add1_full
+#endif
+#if defined(AO_HAVE_char_fetch_and_add_release) \
+ && !defined(AO_HAVE_char_fetch_and_add1_release)
+# define AO_char_fetch_and_add1_release(addr) \
+ AO_char_fetch_and_add_release(addr, 1)
+# define AO_HAVE_char_fetch_and_add1_release
+#endif
+#if defined(AO_HAVE_char_fetch_and_add_acquire) \
+ && !defined(AO_HAVE_char_fetch_and_add1_acquire)
+# define AO_char_fetch_and_add1_acquire(addr) \
+ AO_char_fetch_and_add_acquire(addr, 1)
+# define AO_HAVE_char_fetch_and_add1_acquire
+#endif
+#if defined(AO_HAVE_char_fetch_and_add_write) \
+ && !defined(AO_HAVE_char_fetch_and_add1_write)
+# define AO_char_fetch_and_add1_write(addr) \
+ AO_char_fetch_and_add_write(addr, 1)
+# define AO_HAVE_char_fetch_and_add1_write
+#endif
+#if defined(AO_HAVE_char_fetch_and_add_read) \
+ && !defined(AO_HAVE_char_fetch_and_add1_read)
+# define AO_char_fetch_and_add1_read(addr) \
+ AO_char_fetch_and_add_read(addr, 1)
+# define AO_HAVE_char_fetch_and_add1_read
+#endif
+#if defined(AO_HAVE_char_fetch_and_add_release_write) \
+ && !defined(AO_HAVE_char_fetch_and_add1_release_write)
+# define AO_char_fetch_and_add1_release_write(addr) \
+ AO_char_fetch_and_add_release_write(addr, 1)
+# define AO_HAVE_char_fetch_and_add1_release_write
+#endif
+#if defined(AO_HAVE_char_fetch_and_add_acquire_read) \
+ && !defined(AO_HAVE_char_fetch_and_add1_acquire_read)
+# define AO_char_fetch_and_add1_acquire_read(addr) \
+ AO_char_fetch_and_add_acquire_read(addr, 1)
+# define AO_HAVE_char_fetch_and_add1_acquire_read
+#endif
+#if defined(AO_HAVE_char_fetch_and_add) \
+ && !defined(AO_HAVE_char_fetch_and_add1)
+# define AO_char_fetch_and_add1(addr) AO_char_fetch_and_add(addr, 1)
+# define AO_HAVE_char_fetch_and_add1
+#endif
+
+#if defined(AO_HAVE_char_fetch_and_add1_full)
+# if !defined(AO_HAVE_char_fetch_and_add1_release)
+# define AO_char_fetch_and_add1_release(addr) \
+ AO_char_fetch_and_add1_full(addr)
+# define AO_HAVE_char_fetch_and_add1_release
+# endif
+# if !defined(AO_HAVE_char_fetch_and_add1_acquire)
+# define AO_char_fetch_and_add1_acquire(addr) \
+ AO_char_fetch_and_add1_full(addr)
+# define AO_HAVE_char_fetch_and_add1_acquire
+# endif
+# if !defined(AO_HAVE_char_fetch_and_add1_write)
+# define AO_char_fetch_and_add1_write(addr) \
+ AO_char_fetch_and_add1_full(addr)
+# define AO_HAVE_char_fetch_and_add1_write
+# endif
+# if !defined(AO_HAVE_char_fetch_and_add1_read)
+# define AO_char_fetch_and_add1_read(addr) \
+ AO_char_fetch_and_add1_full(addr)
+# define AO_HAVE_char_fetch_and_add1_read
+# endif
+#endif /* AO_HAVE_char_fetch_and_add1_full */
+
+#if !defined(AO_HAVE_char_fetch_and_add1) \
+ && defined(AO_HAVE_char_fetch_and_add1_release)
+# define AO_char_fetch_and_add1(addr) AO_char_fetch_and_add1_release(addr)
+# define AO_HAVE_char_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_char_fetch_and_add1) \
+ && defined(AO_HAVE_char_fetch_and_add1_acquire)
+# define AO_char_fetch_and_add1(addr) AO_char_fetch_and_add1_acquire(addr)
+# define AO_HAVE_char_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_char_fetch_and_add1) \
+ && defined(AO_HAVE_char_fetch_and_add1_write)
+# define AO_char_fetch_and_add1(addr) AO_char_fetch_and_add1_write(addr)
+# define AO_HAVE_char_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_char_fetch_and_add1) \
+ && defined(AO_HAVE_char_fetch_and_add1_read)
+# define AO_char_fetch_and_add1(addr) AO_char_fetch_and_add1_read(addr)
+# define AO_HAVE_char_fetch_and_add1
+#endif
+
+#if defined(AO_HAVE_char_fetch_and_add1_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_char_fetch_and_add1_full)
+# define AO_char_fetch_and_add1_full(addr) \
+ (AO_nop_full(), AO_char_fetch_and_add1_acquire(addr))
+# define AO_HAVE_char_fetch_and_add1_full
+#endif
+
+#if !defined(AO_HAVE_char_fetch_and_add1_release_write) \
+ && defined(AO_HAVE_char_fetch_and_add1_write)
+# define AO_char_fetch_and_add1_release_write(addr) \
+ AO_char_fetch_and_add1_write(addr)
+# define AO_HAVE_char_fetch_and_add1_release_write
+#endif
+#if !defined(AO_HAVE_char_fetch_and_add1_release_write) \
+ && defined(AO_HAVE_char_fetch_and_add1_release)
+# define AO_char_fetch_and_add1_release_write(addr) \
+ AO_char_fetch_and_add1_release(addr)
+# define AO_HAVE_char_fetch_and_add1_release_write
+#endif
+#if !defined(AO_HAVE_char_fetch_and_add1_acquire_read) \
+ && defined(AO_HAVE_char_fetch_and_add1_read)
+# define AO_char_fetch_and_add1_acquire_read(addr) \
+ AO_char_fetch_and_add1_read(addr)
+# define AO_HAVE_char_fetch_and_add1_acquire_read
+#endif
+#if !defined(AO_HAVE_char_fetch_and_add1_acquire_read) \
+ && defined(AO_HAVE_char_fetch_and_add1_acquire)
+# define AO_char_fetch_and_add1_acquire_read(addr) \
+ AO_char_fetch_and_add1_acquire(addr)
+# define AO_HAVE_char_fetch_and_add1_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_char_fetch_and_add1_acquire_read)
+# define AO_char_fetch_and_add1_dd_acquire_read(addr) \
+ AO_char_fetch_and_add1_acquire_read(addr)
+# define AO_HAVE_char_fetch_and_add1_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_char_fetch_and_add1)
+# define AO_char_fetch_and_add1_dd_acquire_read(addr) \
+ AO_char_fetch_and_add1(addr)
+# define AO_HAVE_char_fetch_and_add1_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* char_fetch_and_sub1 */
+#if defined(AO_HAVE_char_fetch_and_add_full) \
+ && !defined(AO_HAVE_char_fetch_and_sub1_full)
+# define AO_char_fetch_and_sub1_full(addr) \
+ AO_char_fetch_and_add_full(addr, (unsigned/**/char)(-1))
+# define AO_HAVE_char_fetch_and_sub1_full
+#endif
+#if defined(AO_HAVE_char_fetch_and_add_release) \
+ && !defined(AO_HAVE_char_fetch_and_sub1_release)
+# define AO_char_fetch_and_sub1_release(addr) \
+ AO_char_fetch_and_add_release(addr, (unsigned/**/char)(-1))
+# define AO_HAVE_char_fetch_and_sub1_release
+#endif
+#if defined(AO_HAVE_char_fetch_and_add_acquire) \
+ && !defined(AO_HAVE_char_fetch_and_sub1_acquire)
+# define AO_char_fetch_and_sub1_acquire(addr) \
+ AO_char_fetch_and_add_acquire(addr, (unsigned/**/char)(-1))
+# define AO_HAVE_char_fetch_and_sub1_acquire
+#endif
+#if defined(AO_HAVE_char_fetch_and_add_write) \
+ && !defined(AO_HAVE_char_fetch_and_sub1_write)
+# define AO_char_fetch_and_sub1_write(addr) \
+ AO_char_fetch_and_add_write(addr, (unsigned/**/char)(-1))
+# define AO_HAVE_char_fetch_and_sub1_write
+#endif
+#if defined(AO_HAVE_char_fetch_and_add_read) \
+ && !defined(AO_HAVE_char_fetch_and_sub1_read)
+# define AO_char_fetch_and_sub1_read(addr) \
+ AO_char_fetch_and_add_read(addr, (unsigned/**/char)(-1))
+# define AO_HAVE_char_fetch_and_sub1_read
+#endif
+#if defined(AO_HAVE_char_fetch_and_add_release_write) \
+ && !defined(AO_HAVE_char_fetch_and_sub1_release_write)
+# define AO_char_fetch_and_sub1_release_write(addr) \
+ AO_char_fetch_and_add_release_write(addr, (unsigned/**/char)(-1))
+# define AO_HAVE_char_fetch_and_sub1_release_write
+#endif
+#if defined(AO_HAVE_char_fetch_and_add_acquire_read) \
+ && !defined(AO_HAVE_char_fetch_and_sub1_acquire_read)
+# define AO_char_fetch_and_sub1_acquire_read(addr) \
+ AO_char_fetch_and_add_acquire_read(addr, (unsigned/**/char)(-1))
+# define AO_HAVE_char_fetch_and_sub1_acquire_read
+#endif
+#if defined(AO_HAVE_char_fetch_and_add) \
+ && !defined(AO_HAVE_char_fetch_and_sub1)
+# define AO_char_fetch_and_sub1(addr) \
+ AO_char_fetch_and_add(addr, (unsigned/**/char)(-1))
+# define AO_HAVE_char_fetch_and_sub1
+#endif
+
+#if defined(AO_HAVE_char_fetch_and_sub1_full)
+# if !defined(AO_HAVE_char_fetch_and_sub1_release)
+# define AO_char_fetch_and_sub1_release(addr) \
+ AO_char_fetch_and_sub1_full(addr)
+# define AO_HAVE_char_fetch_and_sub1_release
+# endif
+# if !defined(AO_HAVE_char_fetch_and_sub1_acquire)
+# define AO_char_fetch_and_sub1_acquire(addr) \
+ AO_char_fetch_and_sub1_full(addr)
+# define AO_HAVE_char_fetch_and_sub1_acquire
+# endif
+# if !defined(AO_HAVE_char_fetch_and_sub1_write)
+# define AO_char_fetch_and_sub1_write(addr) \
+ AO_char_fetch_and_sub1_full(addr)
+# define AO_HAVE_char_fetch_and_sub1_write
+# endif
+# if !defined(AO_HAVE_char_fetch_and_sub1_read)
+# define AO_char_fetch_and_sub1_read(addr) \
+ AO_char_fetch_and_sub1_full(addr)
+# define AO_HAVE_char_fetch_and_sub1_read
+# endif
+#endif /* AO_HAVE_char_fetch_and_sub1_full */
+
+#if !defined(AO_HAVE_char_fetch_and_sub1) \
+ && defined(AO_HAVE_char_fetch_and_sub1_release)
+# define AO_char_fetch_and_sub1(addr) AO_char_fetch_and_sub1_release(addr)
+# define AO_HAVE_char_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_char_fetch_and_sub1) \
+ && defined(AO_HAVE_char_fetch_and_sub1_acquire)
+# define AO_char_fetch_and_sub1(addr) AO_char_fetch_and_sub1_acquire(addr)
+# define AO_HAVE_char_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_char_fetch_and_sub1) \
+ && defined(AO_HAVE_char_fetch_and_sub1_write)
+# define AO_char_fetch_and_sub1(addr) AO_char_fetch_and_sub1_write(addr)
+# define AO_HAVE_char_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_char_fetch_and_sub1) \
+ && defined(AO_HAVE_char_fetch_and_sub1_read)
+# define AO_char_fetch_and_sub1(addr) AO_char_fetch_and_sub1_read(addr)
+# define AO_HAVE_char_fetch_and_sub1
+#endif
+
+#if defined(AO_HAVE_char_fetch_and_sub1_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_char_fetch_and_sub1_full)
+# define AO_char_fetch_and_sub1_full(addr) \
+ (AO_nop_full(), AO_char_fetch_and_sub1_acquire(addr))
+# define AO_HAVE_char_fetch_and_sub1_full
+#endif
+
+#if !defined(AO_HAVE_char_fetch_and_sub1_release_write) \
+ && defined(AO_HAVE_char_fetch_and_sub1_write)
+# define AO_char_fetch_and_sub1_release_write(addr) \
+ AO_char_fetch_and_sub1_write(addr)
+# define AO_HAVE_char_fetch_and_sub1_release_write
+#endif
+#if !defined(AO_HAVE_char_fetch_and_sub1_release_write) \
+ && defined(AO_HAVE_char_fetch_and_sub1_release)
+# define AO_char_fetch_and_sub1_release_write(addr) \
+ AO_char_fetch_and_sub1_release(addr)
+# define AO_HAVE_char_fetch_and_sub1_release_write
+#endif
+#if !defined(AO_HAVE_char_fetch_and_sub1_acquire_read) \
+ && defined(AO_HAVE_char_fetch_and_sub1_read)
+# define AO_char_fetch_and_sub1_acquire_read(addr) \
+ AO_char_fetch_and_sub1_read(addr)
+# define AO_HAVE_char_fetch_and_sub1_acquire_read
+#endif
+#if !defined(AO_HAVE_char_fetch_and_sub1_acquire_read) \
+ && defined(AO_HAVE_char_fetch_and_sub1_acquire)
+# define AO_char_fetch_and_sub1_acquire_read(addr) \
+ AO_char_fetch_and_sub1_acquire(addr)
+# define AO_HAVE_char_fetch_and_sub1_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_char_fetch_and_sub1_acquire_read)
+# define AO_char_fetch_and_sub1_dd_acquire_read(addr) \
+ AO_char_fetch_and_sub1_acquire_read(addr)
+# define AO_HAVE_char_fetch_and_sub1_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_char_fetch_and_sub1)
+# define AO_char_fetch_and_sub1_dd_acquire_read(addr) \
+ AO_char_fetch_and_sub1(addr)
+# define AO_HAVE_char_fetch_and_sub1_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* char_and */
+#if defined(AO_HAVE_char_compare_and_swap_full) \
+ && !defined(AO_HAVE_char_and_full)
+ AO_INLINE void
+ AO_char_and_full(volatile unsigned/**/char *addr, unsigned/**/char value)
+ {
+ unsigned/**/char old;
+
+ do
+ {
+ old = *(unsigned/**/char *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_full(addr, old,
+ old & value)));
+ }
+# define AO_HAVE_char_and_full
+#endif
+
+#if defined(AO_HAVE_char_and_full)
+# if !defined(AO_HAVE_char_and_release)
+# define AO_char_and_release(addr, val) AO_char_and_full(addr, val)
+# define AO_HAVE_char_and_release
+# endif
+# if !defined(AO_HAVE_char_and_acquire)
+# define AO_char_and_acquire(addr, val) AO_char_and_full(addr, val)
+# define AO_HAVE_char_and_acquire
+# endif
+# if !defined(AO_HAVE_char_and_write)
+# define AO_char_and_write(addr, val) AO_char_and_full(addr, val)
+# define AO_HAVE_char_and_write
+# endif
+# if !defined(AO_HAVE_char_and_read)
+# define AO_char_and_read(addr, val) AO_char_and_full(addr, val)
+# define AO_HAVE_char_and_read
+# endif
+#endif /* AO_HAVE_char_and_full */
+
+#if !defined(AO_HAVE_char_and) && defined(AO_HAVE_char_and_release)
+# define AO_char_and(addr, val) AO_char_and_release(addr, val)
+# define AO_HAVE_char_and
+#endif
+#if !defined(AO_HAVE_char_and) && defined(AO_HAVE_char_and_acquire)
+# define AO_char_and(addr, val) AO_char_and_acquire(addr, val)
+# define AO_HAVE_char_and
+#endif
+#if !defined(AO_HAVE_char_and) && defined(AO_HAVE_char_and_write)
+# define AO_char_and(addr, val) AO_char_and_write(addr, val)
+# define AO_HAVE_char_and
+#endif
+#if !defined(AO_HAVE_char_and) && defined(AO_HAVE_char_and_read)
+# define AO_char_and(addr, val) AO_char_and_read(addr, val)
+# define AO_HAVE_char_and
+#endif
+
+#if defined(AO_HAVE_char_and_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_char_and_full)
+# define AO_char_and_full(addr, val) \
+ (AO_nop_full(), AO_char_and_acquire(addr, val))
+# define AO_HAVE_char_and_full
+#endif
+
+#if !defined(AO_HAVE_char_and_release_write) \
+ && defined(AO_HAVE_char_and_write)
+# define AO_char_and_release_write(addr, val) AO_char_and_write(addr, val)
+# define AO_HAVE_char_and_release_write
+#endif
+#if !defined(AO_HAVE_char_and_release_write) \
+ && defined(AO_HAVE_char_and_release)
+# define AO_char_and_release_write(addr, val) AO_char_and_release(addr, val)
+# define AO_HAVE_char_and_release_write
+#endif
+#if !defined(AO_HAVE_char_and_acquire_read) \
+ && defined(AO_HAVE_char_and_read)
+# define AO_char_and_acquire_read(addr, val) AO_char_and_read(addr, val)
+# define AO_HAVE_char_and_acquire_read
+#endif
+#if !defined(AO_HAVE_char_and_acquire_read) \
+ && defined(AO_HAVE_char_and_acquire)
+# define AO_char_and_acquire_read(addr, val) AO_char_and_acquire(addr, val)
+# define AO_HAVE_char_and_acquire_read
+#endif
+
+/* char_or */
+#if defined(AO_HAVE_char_compare_and_swap_full) \
+ && !defined(AO_HAVE_char_or_full)
+ AO_INLINE void
+ AO_char_or_full(volatile unsigned/**/char *addr, unsigned/**/char value)
+ {
+ unsigned/**/char old;
+
+ do
+ {
+ old = *(unsigned/**/char *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_full(addr, old,
+ old | value)));
+ }
+# define AO_HAVE_char_or_full
+#endif
+
+#if defined(AO_HAVE_char_or_full)
+# if !defined(AO_HAVE_char_or_release)
+# define AO_char_or_release(addr, val) AO_char_or_full(addr, val)
+# define AO_HAVE_char_or_release
+# endif
+# if !defined(AO_HAVE_char_or_acquire)
+# define AO_char_or_acquire(addr, val) AO_char_or_full(addr, val)
+# define AO_HAVE_char_or_acquire
+# endif
+# if !defined(AO_HAVE_char_or_write)
+# define AO_char_or_write(addr, val) AO_char_or_full(addr, val)
+# define AO_HAVE_char_or_write
+# endif
+# if !defined(AO_HAVE_char_or_read)
+# define AO_char_or_read(addr, val) AO_char_or_full(addr, val)
+# define AO_HAVE_char_or_read
+# endif
+#endif /* AO_HAVE_char_or_full */
+
+#if !defined(AO_HAVE_char_or) && defined(AO_HAVE_char_or_release)
+# define AO_char_or(addr, val) AO_char_or_release(addr, val)
+# define AO_HAVE_char_or
+#endif
+#if !defined(AO_HAVE_char_or) && defined(AO_HAVE_char_or_acquire)
+# define AO_char_or(addr, val) AO_char_or_acquire(addr, val)
+# define AO_HAVE_char_or
+#endif
+#if !defined(AO_HAVE_char_or) && defined(AO_HAVE_char_or_write)
+# define AO_char_or(addr, val) AO_char_or_write(addr, val)
+# define AO_HAVE_char_or
+#endif
+#if !defined(AO_HAVE_char_or) && defined(AO_HAVE_char_or_read)
+# define AO_char_or(addr, val) AO_char_or_read(addr, val)
+# define AO_HAVE_char_or
+#endif
+
+#if defined(AO_HAVE_char_or_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_char_or_full)
+# define AO_char_or_full(addr, val) \
+ (AO_nop_full(), AO_char_or_acquire(addr, val))
+# define AO_HAVE_char_or_full
+#endif
+
+#if !defined(AO_HAVE_char_or_release_write) \
+ && defined(AO_HAVE_char_or_write)
+# define AO_char_or_release_write(addr, val) AO_char_or_write(addr, val)
+# define AO_HAVE_char_or_release_write
+#endif
+#if !defined(AO_HAVE_char_or_release_write) \
+ && defined(AO_HAVE_char_or_release)
+# define AO_char_or_release_write(addr, val) AO_char_or_release(addr, val)
+# define AO_HAVE_char_or_release_write
+#endif
+#if !defined(AO_HAVE_char_or_acquire_read) && defined(AO_HAVE_char_or_read)
+# define AO_char_or_acquire_read(addr, val) AO_char_or_read(addr, val)
+# define AO_HAVE_char_or_acquire_read
+#endif
+#if !defined(AO_HAVE_char_or_acquire_read) \
+ && defined(AO_HAVE_char_or_acquire)
+# define AO_char_or_acquire_read(addr, val) AO_char_or_acquire(addr, val)
+# define AO_HAVE_char_or_acquire_read
+#endif
+
+/* char_xor */
+#if defined(AO_HAVE_char_compare_and_swap_full) \
+ && !defined(AO_HAVE_char_xor_full)
+ AO_INLINE void
+ AO_char_xor_full(volatile unsigned/**/char *addr, unsigned/**/char value)
+ {
+ unsigned/**/char old;
+
+ do
+ {
+ old = *(unsigned/**/char *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_full(addr, old,
+ old ^ value)));
+ }
+# define AO_HAVE_char_xor_full
+#endif
+
+#if defined(AO_HAVE_char_xor_full)
+# if !defined(AO_HAVE_char_xor_release)
+# define AO_char_xor_release(addr, val) AO_char_xor_full(addr, val)
+# define AO_HAVE_char_xor_release
+# endif
+# if !defined(AO_HAVE_char_xor_acquire)
+# define AO_char_xor_acquire(addr, val) AO_char_xor_full(addr, val)
+# define AO_HAVE_char_xor_acquire
+# endif
+# if !defined(AO_HAVE_char_xor_write)
+# define AO_char_xor_write(addr, val) AO_char_xor_full(addr, val)
+# define AO_HAVE_char_xor_write
+# endif
+# if !defined(AO_HAVE_char_xor_read)
+# define AO_char_xor_read(addr, val) AO_char_xor_full(addr, val)
+# define AO_HAVE_char_xor_read
+# endif
+#endif /* AO_HAVE_char_xor_full */
+
+#if !defined(AO_HAVE_char_xor) && defined(AO_HAVE_char_xor_release)
+# define AO_char_xor(addr, val) AO_char_xor_release(addr, val)
+# define AO_HAVE_char_xor
+#endif
+#if !defined(AO_HAVE_char_xor) && defined(AO_HAVE_char_xor_acquire)
+# define AO_char_xor(addr, val) AO_char_xor_acquire(addr, val)
+# define AO_HAVE_char_xor
+#endif
+#if !defined(AO_HAVE_char_xor) && defined(AO_HAVE_char_xor_write)
+# define AO_char_xor(addr, val) AO_char_xor_write(addr, val)
+# define AO_HAVE_char_xor
+#endif
+#if !defined(AO_HAVE_char_xor) && defined(AO_HAVE_char_xor_read)
+# define AO_char_xor(addr, val) AO_char_xor_read(addr, val)
+# define AO_HAVE_char_xor
+#endif
+
+#if defined(AO_HAVE_char_xor_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_char_xor_full)
+# define AO_char_xor_full(addr, val) \
+ (AO_nop_full(), AO_char_xor_acquire(addr, val))
+# define AO_HAVE_char_xor_full
+#endif
+
+#if !defined(AO_HAVE_char_xor_release_write) \
+ && defined(AO_HAVE_char_xor_write)
+# define AO_char_xor_release_write(addr, val) AO_char_xor_write(addr, val)
+# define AO_HAVE_char_xor_release_write
+#endif
+#if !defined(AO_HAVE_char_xor_release_write) \
+ && defined(AO_HAVE_char_xor_release)
+# define AO_char_xor_release_write(addr, val) AO_char_xor_release(addr, val)
+# define AO_HAVE_char_xor_release_write
+#endif
+#if !defined(AO_HAVE_char_xor_acquire_read) \
+ && defined(AO_HAVE_char_xor_read)
+# define AO_char_xor_acquire_read(addr, val) AO_char_xor_read(addr, val)
+# define AO_HAVE_char_xor_acquire_read
+#endif
+#if !defined(AO_HAVE_char_xor_acquire_read) \
+ && defined(AO_HAVE_char_xor_acquire)
+# define AO_char_xor_acquire_read(addr, val) AO_char_xor_acquire(addr, val)
+# define AO_HAVE_char_xor_acquire_read
+#endif
+
+/* char_and/or/xor_dd_acquire_read are meaningless. */
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* short_compare_and_swap (based on fetch_compare_and_swap) */
+#if defined(AO_HAVE_short_fetch_compare_and_swap_full) \
+ && !defined(AO_HAVE_short_compare_and_swap_full)
+ AO_INLINE int
+ AO_short_compare_and_swap_full(volatile unsigned/**/short *addr, unsigned/**/short old_val,
+ unsigned/**/short new_val)
+ {
+ return AO_short_fetch_compare_and_swap_full(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_short_compare_and_swap_full
+#endif
+
+#if defined(AO_HAVE_short_fetch_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_short_compare_and_swap_acquire)
+ AO_INLINE int
+ AO_short_compare_and_swap_acquire(volatile unsigned/**/short *addr, unsigned/**/short old_val,
+ unsigned/**/short new_val)
+ {
+ return AO_short_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_short_compare_and_swap_acquire
+#endif
+
+#if defined(AO_HAVE_short_fetch_compare_and_swap_release) \
+ && !defined(AO_HAVE_short_compare_and_swap_release)
+ AO_INLINE int
+ AO_short_compare_and_swap_release(volatile unsigned/**/short *addr, unsigned/**/short old_val,
+ unsigned/**/short new_val)
+ {
+ return AO_short_fetch_compare_and_swap_release(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_short_compare_and_swap_release
+#endif
+
+#if defined(AO_HAVE_short_fetch_compare_and_swap_write) \
+ && !defined(AO_HAVE_short_compare_and_swap_write)
+ AO_INLINE int
+ AO_short_compare_and_swap_write(volatile unsigned/**/short *addr, unsigned/**/short old_val,
+ unsigned/**/short new_val)
+ {
+ return AO_short_fetch_compare_and_swap_write(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_short_compare_and_swap_write
+#endif
+
+#if defined(AO_HAVE_short_fetch_compare_and_swap_read) \
+ && !defined(AO_HAVE_short_compare_and_swap_read)
+ AO_INLINE int
+ AO_short_compare_and_swap_read(volatile unsigned/**/short *addr, unsigned/**/short old_val,
+ unsigned/**/short new_val)
+ {
+ return AO_short_fetch_compare_and_swap_read(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_short_compare_and_swap_read
+#endif
+
+#if defined(AO_HAVE_short_fetch_compare_and_swap) \
+ && !defined(AO_HAVE_short_compare_and_swap)
+ AO_INLINE int
+ AO_short_compare_and_swap(volatile unsigned/**/short *addr, unsigned/**/short old_val,
+ unsigned/**/short new_val)
+ {
+ return AO_short_fetch_compare_and_swap(addr, old_val, new_val) == old_val;
+ }
+# define AO_HAVE_short_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_short_fetch_compare_and_swap_release_write) \
+ && !defined(AO_HAVE_short_compare_and_swap_release_write)
+ AO_INLINE int
+ AO_short_compare_and_swap_release_write(volatile unsigned/**/short *addr,
+ unsigned/**/short old_val, unsigned/**/short new_val)
+ {
+ return AO_short_fetch_compare_and_swap_release_write(addr, old_val,
+ new_val) == old_val;
+ }
+# define AO_HAVE_short_compare_and_swap_release_write
+#endif
+
+#if defined(AO_HAVE_short_fetch_compare_and_swap_acquire_read) \
+ && !defined(AO_HAVE_short_compare_and_swap_acquire_read)
+ AO_INLINE int
+ AO_short_compare_and_swap_acquire_read(volatile unsigned/**/short *addr,
+ unsigned/**/short old_val, unsigned/**/short new_val)
+ {
+ return AO_short_fetch_compare_and_swap_acquire_read(addr, old_val,
+ new_val) == old_val;
+ }
+# define AO_HAVE_short_compare_and_swap_acquire_read
+#endif
+
+#if defined(AO_HAVE_short_fetch_compare_and_swap_dd_acquire_read) \
+ && !defined(AO_HAVE_short_compare_and_swap_dd_acquire_read)
+ AO_INLINE int
+ AO_short_compare_and_swap_dd_acquire_read(volatile unsigned/**/short *addr,
+ unsigned/**/short old_val, unsigned/**/short new_val)
+ {
+ return AO_short_fetch_compare_and_swap_dd_acquire_read(addr, old_val,
+ new_val) == old_val;
+ }
+# define AO_HAVE_short_compare_and_swap_dd_acquire_read
+#endif
+
+/* short_fetch_and_add */
+/* We first try to implement fetch_and_add variants in terms of the */
+/* corresponding compare_and_swap variants to minimize adding barriers. */
+#if defined(AO_HAVE_short_compare_and_swap_full) \
+ && !defined(AO_HAVE_short_fetch_and_add_full)
+ AO_INLINE unsigned/**/short
+ AO_short_fetch_and_add_full(volatile unsigned/**/short *addr, unsigned/**/short incr)
+ {
+ unsigned/**/short old;
+
+ do
+ {
+ old = *(unsigned/**/short *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_full(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_short_fetch_and_add_full
+#endif
+
+#if defined(AO_HAVE_short_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_short_fetch_and_add_acquire)
+ AO_INLINE unsigned/**/short
+ AO_short_fetch_and_add_acquire(volatile unsigned/**/short *addr, unsigned/**/short incr)
+ {
+ unsigned/**/short old;
+
+ do
+ {
+ old = *(unsigned/**/short *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_acquire(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_short_fetch_and_add_acquire
+#endif
+
+#if defined(AO_HAVE_short_compare_and_swap_release) \
+ && !defined(AO_HAVE_short_fetch_and_add_release)
+ AO_INLINE unsigned/**/short
+ AO_short_fetch_and_add_release(volatile unsigned/**/short *addr, unsigned/**/short incr)
+ {
+ unsigned/**/short old;
+
+ do
+ {
+ old = *(unsigned/**/short *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_release(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_short_fetch_and_add_release
+#endif
+
+#if defined(AO_HAVE_short_compare_and_swap) \
+ && !defined(AO_HAVE_short_fetch_and_add)
+ AO_INLINE unsigned/**/short
+ AO_short_fetch_and_add(volatile unsigned/**/short *addr, unsigned/**/short incr)
+ {
+ unsigned/**/short old;
+
+ do
+ {
+ old = *(unsigned/**/short *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_short_compare_and_swap(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_short_fetch_and_add
+#endif
+
+#if defined(AO_HAVE_short_fetch_and_add_full)
+# if !defined(AO_HAVE_short_fetch_and_add_release)
+# define AO_short_fetch_and_add_release(addr, val) \
+ AO_short_fetch_and_add_full(addr, val)
+# define AO_HAVE_short_fetch_and_add_release
+# endif
+# if !defined(AO_HAVE_short_fetch_and_add_acquire)
+# define AO_short_fetch_and_add_acquire(addr, val) \
+ AO_short_fetch_and_add_full(addr, val)
+# define AO_HAVE_short_fetch_and_add_acquire
+# endif
+# if !defined(AO_HAVE_short_fetch_and_add_write)
+# define AO_short_fetch_and_add_write(addr, val) \
+ AO_short_fetch_and_add_full(addr, val)
+# define AO_HAVE_short_fetch_and_add_write
+# endif
+# if !defined(AO_HAVE_short_fetch_and_add_read)
+# define AO_short_fetch_and_add_read(addr, val) \
+ AO_short_fetch_and_add_full(addr, val)
+# define AO_HAVE_short_fetch_and_add_read
+# endif
+#endif /* AO_HAVE_short_fetch_and_add_full */
+
+#if defined(AO_HAVE_short_fetch_and_add) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_short_fetch_and_add_acquire)
+ AO_INLINE unsigned/**/short
+ AO_short_fetch_and_add_acquire(volatile unsigned/**/short *addr, unsigned/**/short incr)
+ {
+ unsigned/**/short result = AO_short_fetch_and_add(addr, incr);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_short_fetch_and_add_acquire
+#endif
+#if defined(AO_HAVE_short_fetch_and_add) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_short_fetch_and_add_release)
+# define AO_short_fetch_and_add_release(addr, incr) \
+ (AO_nop_full(), AO_short_fetch_and_add(addr, incr))
+# define AO_HAVE_short_fetch_and_add_release
+#endif
+
+#if !defined(AO_HAVE_short_fetch_and_add) \
+ && defined(AO_HAVE_short_fetch_and_add_release)
+# define AO_short_fetch_and_add(addr, val) \
+ AO_short_fetch_and_add_release(addr, val)
+# define AO_HAVE_short_fetch_and_add
+#endif
+#if !defined(AO_HAVE_short_fetch_and_add) \
+ && defined(AO_HAVE_short_fetch_and_add_acquire)
+# define AO_short_fetch_and_add(addr, val) \
+ AO_short_fetch_and_add_acquire(addr, val)
+# define AO_HAVE_short_fetch_and_add
+#endif
+#if !defined(AO_HAVE_short_fetch_and_add) \
+ && defined(AO_HAVE_short_fetch_and_add_write)
+# define AO_short_fetch_and_add(addr, val) \
+ AO_short_fetch_and_add_write(addr, val)
+# define AO_HAVE_short_fetch_and_add
+#endif
+#if !defined(AO_HAVE_short_fetch_and_add) \
+ && defined(AO_HAVE_short_fetch_and_add_read)
+# define AO_short_fetch_and_add(addr, val) \
+ AO_short_fetch_and_add_read(addr, val)
+# define AO_HAVE_short_fetch_and_add
+#endif
+
+#if defined(AO_HAVE_short_fetch_and_add_acquire) \
+ && defined(AO_HAVE_nop_full) && !defined(AO_HAVE_short_fetch_and_add_full)
+# define AO_short_fetch_and_add_full(addr, val) \
+ (AO_nop_full(), AO_short_fetch_and_add_acquire(addr, val))
+# define AO_HAVE_short_fetch_and_add_full
+#endif
+
+#if !defined(AO_HAVE_short_fetch_and_add_release_write) \
+ && defined(AO_HAVE_short_fetch_and_add_write)
+# define AO_short_fetch_and_add_release_write(addr, val) \
+ AO_short_fetch_and_add_write(addr, val)
+# define AO_HAVE_short_fetch_and_add_release_write
+#endif
+#if !defined(AO_HAVE_short_fetch_and_add_release_write) \
+ && defined(AO_HAVE_short_fetch_and_add_release)
+# define AO_short_fetch_and_add_release_write(addr, val) \
+ AO_short_fetch_and_add_release(addr, val)
+# define AO_HAVE_short_fetch_and_add_release_write
+#endif
+
+#if !defined(AO_HAVE_short_fetch_and_add_acquire_read) \
+ && defined(AO_HAVE_short_fetch_and_add_read)
+# define AO_short_fetch_and_add_acquire_read(addr, val) \
+ AO_short_fetch_and_add_read(addr, val)
+# define AO_HAVE_short_fetch_and_add_acquire_read
+#endif
+#if !defined(AO_HAVE_short_fetch_and_add_acquire_read) \
+ && defined(AO_HAVE_short_fetch_and_add_acquire)
+# define AO_short_fetch_and_add_acquire_read(addr, val) \
+ AO_short_fetch_and_add_acquire(addr, val)
+# define AO_HAVE_short_fetch_and_add_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_short_fetch_and_add_acquire_read)
+# define AO_short_fetch_and_add_dd_acquire_read(addr, val) \
+ AO_short_fetch_and_add_acquire_read(addr, val)
+# define AO_HAVE_short_fetch_and_add_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_short_fetch_and_add)
+# define AO_short_fetch_and_add_dd_acquire_read(addr, val) \
+ AO_short_fetch_and_add(addr, val)
+# define AO_HAVE_short_fetch_and_add_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* short_fetch_and_add1 */
+#if defined(AO_HAVE_short_fetch_and_add_full) \
+ && !defined(AO_HAVE_short_fetch_and_add1_full)
+# define AO_short_fetch_and_add1_full(addr) \
+ AO_short_fetch_and_add_full(addr, 1)
+# define AO_HAVE_short_fetch_and_add1_full
+#endif
+#if defined(AO_HAVE_short_fetch_and_add_release) \
+ && !defined(AO_HAVE_short_fetch_and_add1_release)
+# define AO_short_fetch_and_add1_release(addr) \
+ AO_short_fetch_and_add_release(addr, 1)
+# define AO_HAVE_short_fetch_and_add1_release
+#endif
+#if defined(AO_HAVE_short_fetch_and_add_acquire) \
+ && !defined(AO_HAVE_short_fetch_and_add1_acquire)
+# define AO_short_fetch_and_add1_acquire(addr) \
+ AO_short_fetch_and_add_acquire(addr, 1)
+# define AO_HAVE_short_fetch_and_add1_acquire
+#endif
+#if defined(AO_HAVE_short_fetch_and_add_write) \
+ && !defined(AO_HAVE_short_fetch_and_add1_write)
+# define AO_short_fetch_and_add1_write(addr) \
+ AO_short_fetch_and_add_write(addr, 1)
+# define AO_HAVE_short_fetch_and_add1_write
+#endif
+#if defined(AO_HAVE_short_fetch_and_add_read) \
+ && !defined(AO_HAVE_short_fetch_and_add1_read)
+# define AO_short_fetch_and_add1_read(addr) \
+ AO_short_fetch_and_add_read(addr, 1)
+# define AO_HAVE_short_fetch_and_add1_read
+#endif
+#if defined(AO_HAVE_short_fetch_and_add_release_write) \
+ && !defined(AO_HAVE_short_fetch_and_add1_release_write)
+# define AO_short_fetch_and_add1_release_write(addr) \
+ AO_short_fetch_and_add_release_write(addr, 1)
+# define AO_HAVE_short_fetch_and_add1_release_write
+#endif
+#if defined(AO_HAVE_short_fetch_and_add_acquire_read) \
+ && !defined(AO_HAVE_short_fetch_and_add1_acquire_read)
+# define AO_short_fetch_and_add1_acquire_read(addr) \
+ AO_short_fetch_and_add_acquire_read(addr, 1)
+# define AO_HAVE_short_fetch_and_add1_acquire_read
+#endif
+#if defined(AO_HAVE_short_fetch_and_add) \
+ && !defined(AO_HAVE_short_fetch_and_add1)
+# define AO_short_fetch_and_add1(addr) AO_short_fetch_and_add(addr, 1)
+# define AO_HAVE_short_fetch_and_add1
+#endif
+
+#if defined(AO_HAVE_short_fetch_and_add1_full)
+# if !defined(AO_HAVE_short_fetch_and_add1_release)
+# define AO_short_fetch_and_add1_release(addr) \
+ AO_short_fetch_and_add1_full(addr)
+# define AO_HAVE_short_fetch_and_add1_release
+# endif
+# if !defined(AO_HAVE_short_fetch_and_add1_acquire)
+# define AO_short_fetch_and_add1_acquire(addr) \
+ AO_short_fetch_and_add1_full(addr)
+# define AO_HAVE_short_fetch_and_add1_acquire
+# endif
+# if !defined(AO_HAVE_short_fetch_and_add1_write)
+# define AO_short_fetch_and_add1_write(addr) \
+ AO_short_fetch_and_add1_full(addr)
+# define AO_HAVE_short_fetch_and_add1_write
+# endif
+# if !defined(AO_HAVE_short_fetch_and_add1_read)
+# define AO_short_fetch_and_add1_read(addr) \
+ AO_short_fetch_and_add1_full(addr)
+# define AO_HAVE_short_fetch_and_add1_read
+# endif
+#endif /* AO_HAVE_short_fetch_and_add1_full */
+
+#if !defined(AO_HAVE_short_fetch_and_add1) \
+ && defined(AO_HAVE_short_fetch_and_add1_release)
+# define AO_short_fetch_and_add1(addr) AO_short_fetch_and_add1_release(addr)
+# define AO_HAVE_short_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_short_fetch_and_add1) \
+ && defined(AO_HAVE_short_fetch_and_add1_acquire)
+# define AO_short_fetch_and_add1(addr) AO_short_fetch_and_add1_acquire(addr)
+# define AO_HAVE_short_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_short_fetch_and_add1) \
+ && defined(AO_HAVE_short_fetch_and_add1_write)
+# define AO_short_fetch_and_add1(addr) AO_short_fetch_and_add1_write(addr)
+# define AO_HAVE_short_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_short_fetch_and_add1) \
+ && defined(AO_HAVE_short_fetch_and_add1_read)
+# define AO_short_fetch_and_add1(addr) AO_short_fetch_and_add1_read(addr)
+# define AO_HAVE_short_fetch_and_add1
+#endif
+
+#if defined(AO_HAVE_short_fetch_and_add1_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_short_fetch_and_add1_full)
+# define AO_short_fetch_and_add1_full(addr) \
+ (AO_nop_full(), AO_short_fetch_and_add1_acquire(addr))
+# define AO_HAVE_short_fetch_and_add1_full
+#endif
+
+#if !defined(AO_HAVE_short_fetch_and_add1_release_write) \
+ && defined(AO_HAVE_short_fetch_and_add1_write)
+# define AO_short_fetch_and_add1_release_write(addr) \
+ AO_short_fetch_and_add1_write(addr)
+# define AO_HAVE_short_fetch_and_add1_release_write
+#endif
+#if !defined(AO_HAVE_short_fetch_and_add1_release_write) \
+ && defined(AO_HAVE_short_fetch_and_add1_release)
+# define AO_short_fetch_and_add1_release_write(addr) \
+ AO_short_fetch_and_add1_release(addr)
+# define AO_HAVE_short_fetch_and_add1_release_write
+#endif
+#if !defined(AO_HAVE_short_fetch_and_add1_acquire_read) \
+ && defined(AO_HAVE_short_fetch_and_add1_read)
+# define AO_short_fetch_and_add1_acquire_read(addr) \
+ AO_short_fetch_and_add1_read(addr)
+# define AO_HAVE_short_fetch_and_add1_acquire_read
+#endif
+#if !defined(AO_HAVE_short_fetch_and_add1_acquire_read) \
+ && defined(AO_HAVE_short_fetch_and_add1_acquire)
+# define AO_short_fetch_and_add1_acquire_read(addr) \
+ AO_short_fetch_and_add1_acquire(addr)
+# define AO_HAVE_short_fetch_and_add1_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_short_fetch_and_add1_acquire_read)
+# define AO_short_fetch_and_add1_dd_acquire_read(addr) \
+ AO_short_fetch_and_add1_acquire_read(addr)
+# define AO_HAVE_short_fetch_and_add1_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_short_fetch_and_add1)
+# define AO_short_fetch_and_add1_dd_acquire_read(addr) \
+ AO_short_fetch_and_add1(addr)
+# define AO_HAVE_short_fetch_and_add1_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* short_fetch_and_sub1 */
+#if defined(AO_HAVE_short_fetch_and_add_full) \
+ && !defined(AO_HAVE_short_fetch_and_sub1_full)
+# define AO_short_fetch_and_sub1_full(addr) \
+ AO_short_fetch_and_add_full(addr, (unsigned/**/short)(-1))
+# define AO_HAVE_short_fetch_and_sub1_full
+#endif
+#if defined(AO_HAVE_short_fetch_and_add_release) \
+ && !defined(AO_HAVE_short_fetch_and_sub1_release)
+# define AO_short_fetch_and_sub1_release(addr) \
+ AO_short_fetch_and_add_release(addr, (unsigned/**/short)(-1))
+# define AO_HAVE_short_fetch_and_sub1_release
+#endif
+#if defined(AO_HAVE_short_fetch_and_add_acquire) \
+ && !defined(AO_HAVE_short_fetch_and_sub1_acquire)
+# define AO_short_fetch_and_sub1_acquire(addr) \
+ AO_short_fetch_and_add_acquire(addr, (unsigned/**/short)(-1))
+# define AO_HAVE_short_fetch_and_sub1_acquire
+#endif
+#if defined(AO_HAVE_short_fetch_and_add_write) \
+ && !defined(AO_HAVE_short_fetch_and_sub1_write)
+# define AO_short_fetch_and_sub1_write(addr) \
+ AO_short_fetch_and_add_write(addr, (unsigned/**/short)(-1))
+# define AO_HAVE_short_fetch_and_sub1_write
+#endif
+#if defined(AO_HAVE_short_fetch_and_add_read) \
+ && !defined(AO_HAVE_short_fetch_and_sub1_read)
+# define AO_short_fetch_and_sub1_read(addr) \
+ AO_short_fetch_and_add_read(addr, (unsigned/**/short)(-1))
+# define AO_HAVE_short_fetch_and_sub1_read
+#endif
+#if defined(AO_HAVE_short_fetch_and_add_release_write) \
+ && !defined(AO_HAVE_short_fetch_and_sub1_release_write)
+# define AO_short_fetch_and_sub1_release_write(addr) \
+ AO_short_fetch_and_add_release_write(addr, (unsigned/**/short)(-1))
+# define AO_HAVE_short_fetch_and_sub1_release_write
+#endif
+#if defined(AO_HAVE_short_fetch_and_add_acquire_read) \
+ && !defined(AO_HAVE_short_fetch_and_sub1_acquire_read)
+# define AO_short_fetch_and_sub1_acquire_read(addr) \
+ AO_short_fetch_and_add_acquire_read(addr, (unsigned/**/short)(-1))
+# define AO_HAVE_short_fetch_and_sub1_acquire_read
+#endif
+#if defined(AO_HAVE_short_fetch_and_add) \
+ && !defined(AO_HAVE_short_fetch_and_sub1)
+# define AO_short_fetch_and_sub1(addr) \
+ AO_short_fetch_and_add(addr, (unsigned/**/short)(-1))
+# define AO_HAVE_short_fetch_and_sub1
+#endif
+
+#if defined(AO_HAVE_short_fetch_and_sub1_full)
+# if !defined(AO_HAVE_short_fetch_and_sub1_release)
+# define AO_short_fetch_and_sub1_release(addr) \
+ AO_short_fetch_and_sub1_full(addr)
+# define AO_HAVE_short_fetch_and_sub1_release
+# endif
+# if !defined(AO_HAVE_short_fetch_and_sub1_acquire)
+# define AO_short_fetch_and_sub1_acquire(addr) \
+ AO_short_fetch_and_sub1_full(addr)
+# define AO_HAVE_short_fetch_and_sub1_acquire
+# endif
+# if !defined(AO_HAVE_short_fetch_and_sub1_write)
+# define AO_short_fetch_and_sub1_write(addr) \
+ AO_short_fetch_and_sub1_full(addr)
+# define AO_HAVE_short_fetch_and_sub1_write
+# endif
+# if !defined(AO_HAVE_short_fetch_and_sub1_read)
+# define AO_short_fetch_and_sub1_read(addr) \
+ AO_short_fetch_and_sub1_full(addr)
+# define AO_HAVE_short_fetch_and_sub1_read
+# endif
+#endif /* AO_HAVE_short_fetch_and_sub1_full */
+
+#if !defined(AO_HAVE_short_fetch_and_sub1) \
+ && defined(AO_HAVE_short_fetch_and_sub1_release)
+# define AO_short_fetch_and_sub1(addr) AO_short_fetch_and_sub1_release(addr)
+# define AO_HAVE_short_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_short_fetch_and_sub1) \
+ && defined(AO_HAVE_short_fetch_and_sub1_acquire)
+# define AO_short_fetch_and_sub1(addr) AO_short_fetch_and_sub1_acquire(addr)
+# define AO_HAVE_short_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_short_fetch_and_sub1) \
+ && defined(AO_HAVE_short_fetch_and_sub1_write)
+# define AO_short_fetch_and_sub1(addr) AO_short_fetch_and_sub1_write(addr)
+# define AO_HAVE_short_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_short_fetch_and_sub1) \
+ && defined(AO_HAVE_short_fetch_and_sub1_read)
+# define AO_short_fetch_and_sub1(addr) AO_short_fetch_and_sub1_read(addr)
+# define AO_HAVE_short_fetch_and_sub1
+#endif
+
+#if defined(AO_HAVE_short_fetch_and_sub1_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_short_fetch_and_sub1_full)
+# define AO_short_fetch_and_sub1_full(addr) \
+ (AO_nop_full(), AO_short_fetch_and_sub1_acquire(addr))
+# define AO_HAVE_short_fetch_and_sub1_full
+#endif
+
+#if !defined(AO_HAVE_short_fetch_and_sub1_release_write) \
+ && defined(AO_HAVE_short_fetch_and_sub1_write)
+# define AO_short_fetch_and_sub1_release_write(addr) \
+ AO_short_fetch_and_sub1_write(addr)
+# define AO_HAVE_short_fetch_and_sub1_release_write
+#endif
+#if !defined(AO_HAVE_short_fetch_and_sub1_release_write) \
+ && defined(AO_HAVE_short_fetch_and_sub1_release)
+# define AO_short_fetch_and_sub1_release_write(addr) \
+ AO_short_fetch_and_sub1_release(addr)
+# define AO_HAVE_short_fetch_and_sub1_release_write
+#endif
+#if !defined(AO_HAVE_short_fetch_and_sub1_acquire_read) \
+ && defined(AO_HAVE_short_fetch_and_sub1_read)
+# define AO_short_fetch_and_sub1_acquire_read(addr) \
+ AO_short_fetch_and_sub1_read(addr)
+# define AO_HAVE_short_fetch_and_sub1_acquire_read
+#endif
+#if !defined(AO_HAVE_short_fetch_and_sub1_acquire_read) \
+ && defined(AO_HAVE_short_fetch_and_sub1_acquire)
+# define AO_short_fetch_and_sub1_acquire_read(addr) \
+ AO_short_fetch_and_sub1_acquire(addr)
+# define AO_HAVE_short_fetch_and_sub1_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_short_fetch_and_sub1_acquire_read)
+# define AO_short_fetch_and_sub1_dd_acquire_read(addr) \
+ AO_short_fetch_and_sub1_acquire_read(addr)
+# define AO_HAVE_short_fetch_and_sub1_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_short_fetch_and_sub1)
+# define AO_short_fetch_and_sub1_dd_acquire_read(addr) \
+ AO_short_fetch_and_sub1(addr)
+# define AO_HAVE_short_fetch_and_sub1_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* short_and */
+#if defined(AO_HAVE_short_compare_and_swap_full) \
+ && !defined(AO_HAVE_short_and_full)
+ AO_INLINE void
+ AO_short_and_full(volatile unsigned/**/short *addr, unsigned/**/short value)
+ {
+ unsigned/**/short old;
+
+ do
+ {
+ old = *(unsigned/**/short *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_full(addr, old,
+ old & value)));
+ }
+# define AO_HAVE_short_and_full
+#endif
+
+#if defined(AO_HAVE_short_and_full)
+# if !defined(AO_HAVE_short_and_release)
+# define AO_short_and_release(addr, val) AO_short_and_full(addr, val)
+# define AO_HAVE_short_and_release
+# endif
+# if !defined(AO_HAVE_short_and_acquire)
+# define AO_short_and_acquire(addr, val) AO_short_and_full(addr, val)
+# define AO_HAVE_short_and_acquire
+# endif
+# if !defined(AO_HAVE_short_and_write)
+# define AO_short_and_write(addr, val) AO_short_and_full(addr, val)
+# define AO_HAVE_short_and_write
+# endif
+# if !defined(AO_HAVE_short_and_read)
+# define AO_short_and_read(addr, val) AO_short_and_full(addr, val)
+# define AO_HAVE_short_and_read
+# endif
+#endif /* AO_HAVE_short_and_full */
+
+#if !defined(AO_HAVE_short_and) && defined(AO_HAVE_short_and_release)
+# define AO_short_and(addr, val) AO_short_and_release(addr, val)
+# define AO_HAVE_short_and
+#endif
+#if !defined(AO_HAVE_short_and) && defined(AO_HAVE_short_and_acquire)
+# define AO_short_and(addr, val) AO_short_and_acquire(addr, val)
+# define AO_HAVE_short_and
+#endif
+#if !defined(AO_HAVE_short_and) && defined(AO_HAVE_short_and_write)
+# define AO_short_and(addr, val) AO_short_and_write(addr, val)
+# define AO_HAVE_short_and
+#endif
+#if !defined(AO_HAVE_short_and) && defined(AO_HAVE_short_and_read)
+# define AO_short_and(addr, val) AO_short_and_read(addr, val)
+# define AO_HAVE_short_and
+#endif
+
+#if defined(AO_HAVE_short_and_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_short_and_full)
+# define AO_short_and_full(addr, val) \
+ (AO_nop_full(), AO_short_and_acquire(addr, val))
+# define AO_HAVE_short_and_full
+#endif
+
+#if !defined(AO_HAVE_short_and_release_write) \
+ && defined(AO_HAVE_short_and_write)
+# define AO_short_and_release_write(addr, val) AO_short_and_write(addr, val)
+# define AO_HAVE_short_and_release_write
+#endif
+#if !defined(AO_HAVE_short_and_release_write) \
+ && defined(AO_HAVE_short_and_release)
+# define AO_short_and_release_write(addr, val) AO_short_and_release(addr, val)
+# define AO_HAVE_short_and_release_write
+#endif
+#if !defined(AO_HAVE_short_and_acquire_read) \
+ && defined(AO_HAVE_short_and_read)
+# define AO_short_and_acquire_read(addr, val) AO_short_and_read(addr, val)
+# define AO_HAVE_short_and_acquire_read
+#endif
+#if !defined(AO_HAVE_short_and_acquire_read) \
+ && defined(AO_HAVE_short_and_acquire)
+# define AO_short_and_acquire_read(addr, val) AO_short_and_acquire(addr, val)
+# define AO_HAVE_short_and_acquire_read
+#endif
+
+/* short_or */
+#if defined(AO_HAVE_short_compare_and_swap_full) \
+ && !defined(AO_HAVE_short_or_full)
+ AO_INLINE void
+ AO_short_or_full(volatile unsigned/**/short *addr, unsigned/**/short value)
+ {
+ unsigned/**/short old;
+
+ do
+ {
+ old = *(unsigned/**/short *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_full(addr, old,
+ old | value)));
+ }
+# define AO_HAVE_short_or_full
+#endif
+
+#if defined(AO_HAVE_short_or_full)
+# if !defined(AO_HAVE_short_or_release)
+# define AO_short_or_release(addr, val) AO_short_or_full(addr, val)
+# define AO_HAVE_short_or_release
+# endif
+# if !defined(AO_HAVE_short_or_acquire)
+# define AO_short_or_acquire(addr, val) AO_short_or_full(addr, val)
+# define AO_HAVE_short_or_acquire
+# endif
+# if !defined(AO_HAVE_short_or_write)
+# define AO_short_or_write(addr, val) AO_short_or_full(addr, val)
+# define AO_HAVE_short_or_write
+# endif
+# if !defined(AO_HAVE_short_or_read)
+# define AO_short_or_read(addr, val) AO_short_or_full(addr, val)
+# define AO_HAVE_short_or_read
+# endif
+#endif /* AO_HAVE_short_or_full */
+
+#if !defined(AO_HAVE_short_or) && defined(AO_HAVE_short_or_release)
+# define AO_short_or(addr, val) AO_short_or_release(addr, val)
+# define AO_HAVE_short_or
+#endif
+#if !defined(AO_HAVE_short_or) && defined(AO_HAVE_short_or_acquire)
+# define AO_short_or(addr, val) AO_short_or_acquire(addr, val)
+# define AO_HAVE_short_or
+#endif
+#if !defined(AO_HAVE_short_or) && defined(AO_HAVE_short_or_write)
+# define AO_short_or(addr, val) AO_short_or_write(addr, val)
+# define AO_HAVE_short_or
+#endif
+#if !defined(AO_HAVE_short_or) && defined(AO_HAVE_short_or_read)
+# define AO_short_or(addr, val) AO_short_or_read(addr, val)
+# define AO_HAVE_short_or
+#endif
+
+#if defined(AO_HAVE_short_or_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_short_or_full)
+# define AO_short_or_full(addr, val) \
+ (AO_nop_full(), AO_short_or_acquire(addr, val))
+# define AO_HAVE_short_or_full
+#endif
+
+#if !defined(AO_HAVE_short_or_release_write) \
+ && defined(AO_HAVE_short_or_write)
+# define AO_short_or_release_write(addr, val) AO_short_or_write(addr, val)
+# define AO_HAVE_short_or_release_write
+#endif
+#if !defined(AO_HAVE_short_or_release_write) \
+ && defined(AO_HAVE_short_or_release)
+# define AO_short_or_release_write(addr, val) AO_short_or_release(addr, val)
+# define AO_HAVE_short_or_release_write
+#endif
+#if !defined(AO_HAVE_short_or_acquire_read) && defined(AO_HAVE_short_or_read)
+# define AO_short_or_acquire_read(addr, val) AO_short_or_read(addr, val)
+# define AO_HAVE_short_or_acquire_read
+#endif
+#if !defined(AO_HAVE_short_or_acquire_read) \
+ && defined(AO_HAVE_short_or_acquire)
+# define AO_short_or_acquire_read(addr, val) AO_short_or_acquire(addr, val)
+# define AO_HAVE_short_or_acquire_read
+#endif
+
+/* short_xor */
+#if defined(AO_HAVE_short_compare_and_swap_full) \
+ && !defined(AO_HAVE_short_xor_full)
+ AO_INLINE void
+ AO_short_xor_full(volatile unsigned/**/short *addr, unsigned/**/short value)
+ {
+ unsigned/**/short old;
+
+ do
+ {
+ old = *(unsigned/**/short *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_full(addr, old,
+ old ^ value)));
+ }
+# define AO_HAVE_short_xor_full
+#endif
+
+#if defined(AO_HAVE_short_xor_full)
+# if !defined(AO_HAVE_short_xor_release)
+# define AO_short_xor_release(addr, val) AO_short_xor_full(addr, val)
+# define AO_HAVE_short_xor_release
+# endif
+# if !defined(AO_HAVE_short_xor_acquire)
+# define AO_short_xor_acquire(addr, val) AO_short_xor_full(addr, val)
+# define AO_HAVE_short_xor_acquire
+# endif
+# if !defined(AO_HAVE_short_xor_write)
+# define AO_short_xor_write(addr, val) AO_short_xor_full(addr, val)
+# define AO_HAVE_short_xor_write
+# endif
+# if !defined(AO_HAVE_short_xor_read)
+# define AO_short_xor_read(addr, val) AO_short_xor_full(addr, val)
+# define AO_HAVE_short_xor_read
+# endif
+#endif /* AO_HAVE_short_xor_full */
+
+#if !defined(AO_HAVE_short_xor) && defined(AO_HAVE_short_xor_release)
+# define AO_short_xor(addr, val) AO_short_xor_release(addr, val)
+# define AO_HAVE_short_xor
+#endif
+#if !defined(AO_HAVE_short_xor) && defined(AO_HAVE_short_xor_acquire)
+# define AO_short_xor(addr, val) AO_short_xor_acquire(addr, val)
+# define AO_HAVE_short_xor
+#endif
+#if !defined(AO_HAVE_short_xor) && defined(AO_HAVE_short_xor_write)
+# define AO_short_xor(addr, val) AO_short_xor_write(addr, val)
+# define AO_HAVE_short_xor
+#endif
+#if !defined(AO_HAVE_short_xor) && defined(AO_HAVE_short_xor_read)
+# define AO_short_xor(addr, val) AO_short_xor_read(addr, val)
+# define AO_HAVE_short_xor
+#endif
+
+#if defined(AO_HAVE_short_xor_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_short_xor_full)
+# define AO_short_xor_full(addr, val) \
+ (AO_nop_full(), AO_short_xor_acquire(addr, val))
+# define AO_HAVE_short_xor_full
+#endif
+
+#if !defined(AO_HAVE_short_xor_release_write) \
+ && defined(AO_HAVE_short_xor_write)
+# define AO_short_xor_release_write(addr, val) AO_short_xor_write(addr, val)
+# define AO_HAVE_short_xor_release_write
+#endif
+#if !defined(AO_HAVE_short_xor_release_write) \
+ && defined(AO_HAVE_short_xor_release)
+# define AO_short_xor_release_write(addr, val) AO_short_xor_release(addr, val)
+# define AO_HAVE_short_xor_release_write
+#endif
+#if !defined(AO_HAVE_short_xor_acquire_read) \
+ && defined(AO_HAVE_short_xor_read)
+# define AO_short_xor_acquire_read(addr, val) AO_short_xor_read(addr, val)
+# define AO_HAVE_short_xor_acquire_read
+#endif
+#if !defined(AO_HAVE_short_xor_acquire_read) \
+ && defined(AO_HAVE_short_xor_acquire)
+# define AO_short_xor_acquire_read(addr, val) AO_short_xor_acquire(addr, val)
+# define AO_HAVE_short_xor_acquire_read
+#endif
+
+/* short_and/or/xor_dd_acquire_read are meaningless. */
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* int_compare_and_swap (based on fetch_compare_and_swap) */
+#if defined(AO_HAVE_int_fetch_compare_and_swap_full) \
+ && !defined(AO_HAVE_int_compare_and_swap_full)
+ AO_INLINE int
+ AO_int_compare_and_swap_full(volatile unsigned *addr, unsigned old_val,
+ unsigned new_val)
+ {
+ return AO_int_fetch_compare_and_swap_full(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_int_compare_and_swap_full
+#endif
+
+#if defined(AO_HAVE_int_fetch_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_int_compare_and_swap_acquire)
+ AO_INLINE int
+ AO_int_compare_and_swap_acquire(volatile unsigned *addr, unsigned old_val,
+ unsigned new_val)
+ {
+ return AO_int_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_int_compare_and_swap_acquire
+#endif
+
+#if defined(AO_HAVE_int_fetch_compare_and_swap_release) \
+ && !defined(AO_HAVE_int_compare_and_swap_release)
+ AO_INLINE int
+ AO_int_compare_and_swap_release(volatile unsigned *addr, unsigned old_val,
+ unsigned new_val)
+ {
+ return AO_int_fetch_compare_and_swap_release(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_int_compare_and_swap_release
+#endif
+
+#if defined(AO_HAVE_int_fetch_compare_and_swap_write) \
+ && !defined(AO_HAVE_int_compare_and_swap_write)
+ AO_INLINE int
+ AO_int_compare_and_swap_write(volatile unsigned *addr, unsigned old_val,
+ unsigned new_val)
+ {
+ return AO_int_fetch_compare_and_swap_write(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_int_compare_and_swap_write
+#endif
+
+#if defined(AO_HAVE_int_fetch_compare_and_swap_read) \
+ && !defined(AO_HAVE_int_compare_and_swap_read)
+ AO_INLINE int
+ AO_int_compare_and_swap_read(volatile unsigned *addr, unsigned old_val,
+ unsigned new_val)
+ {
+ return AO_int_fetch_compare_and_swap_read(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_int_compare_and_swap_read
+#endif
+
+#if defined(AO_HAVE_int_fetch_compare_and_swap) \
+ && !defined(AO_HAVE_int_compare_and_swap)
+ AO_INLINE int
+ AO_int_compare_and_swap(volatile unsigned *addr, unsigned old_val,
+ unsigned new_val)
+ {
+ return AO_int_fetch_compare_and_swap(addr, old_val, new_val) == old_val;
+ }
+# define AO_HAVE_int_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_int_fetch_compare_and_swap_release_write) \
+ && !defined(AO_HAVE_int_compare_and_swap_release_write)
+ AO_INLINE int
+ AO_int_compare_and_swap_release_write(volatile unsigned *addr,
+ unsigned old_val, unsigned new_val)
+ {
+ return AO_int_fetch_compare_and_swap_release_write(addr, old_val,
+ new_val) == old_val;
+ }
+# define AO_HAVE_int_compare_and_swap_release_write
+#endif
+
+#if defined(AO_HAVE_int_fetch_compare_and_swap_acquire_read) \
+ && !defined(AO_HAVE_int_compare_and_swap_acquire_read)
+ AO_INLINE int
+ AO_int_compare_and_swap_acquire_read(volatile unsigned *addr,
+ unsigned old_val, unsigned new_val)
+ {
+ return AO_int_fetch_compare_and_swap_acquire_read(addr, old_val,
+ new_val) == old_val;
+ }
+# define AO_HAVE_int_compare_and_swap_acquire_read
+#endif
+
+#if defined(AO_HAVE_int_fetch_compare_and_swap_dd_acquire_read) \
+ && !defined(AO_HAVE_int_compare_and_swap_dd_acquire_read)
+ AO_INLINE int
+ AO_int_compare_and_swap_dd_acquire_read(volatile unsigned *addr,
+ unsigned old_val, unsigned new_val)
+ {
+ return AO_int_fetch_compare_and_swap_dd_acquire_read(addr, old_val,
+ new_val) == old_val;
+ }
+# define AO_HAVE_int_compare_and_swap_dd_acquire_read
+#endif
+
+/* int_fetch_and_add */
+/* We first try to implement fetch_and_add variants in terms of the */
+/* corresponding compare_and_swap variants to minimize adding barriers. */
+#if defined(AO_HAVE_int_compare_and_swap_full) \
+ && !defined(AO_HAVE_int_fetch_and_add_full)
+ AO_INLINE unsigned
+ AO_int_fetch_and_add_full(volatile unsigned *addr, unsigned incr)
+ {
+ unsigned old;
+
+ do
+ {
+ old = *(unsigned *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_full(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_int_fetch_and_add_full
+#endif
+
+#if defined(AO_HAVE_int_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_int_fetch_and_add_acquire)
+ AO_INLINE unsigned
+ AO_int_fetch_and_add_acquire(volatile unsigned *addr, unsigned incr)
+ {
+ unsigned old;
+
+ do
+ {
+ old = *(unsigned *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_acquire(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_int_fetch_and_add_acquire
+#endif
+
+#if defined(AO_HAVE_int_compare_and_swap_release) \
+ && !defined(AO_HAVE_int_fetch_and_add_release)
+ AO_INLINE unsigned
+ AO_int_fetch_and_add_release(volatile unsigned *addr, unsigned incr)
+ {
+ unsigned old;
+
+ do
+ {
+ old = *(unsigned *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_release(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_int_fetch_and_add_release
+#endif
+
+#if defined(AO_HAVE_int_compare_and_swap) \
+ && !defined(AO_HAVE_int_fetch_and_add)
+ AO_INLINE unsigned
+ AO_int_fetch_and_add(volatile unsigned *addr, unsigned incr)
+ {
+ unsigned old;
+
+ do
+ {
+ old = *(unsigned *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_int_compare_and_swap(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_int_fetch_and_add
+#endif
+
+#if defined(AO_HAVE_int_fetch_and_add_full)
+# if !defined(AO_HAVE_int_fetch_and_add_release)
+# define AO_int_fetch_and_add_release(addr, val) \
+ AO_int_fetch_and_add_full(addr, val)
+# define AO_HAVE_int_fetch_and_add_release
+# endif
+# if !defined(AO_HAVE_int_fetch_and_add_acquire)
+# define AO_int_fetch_and_add_acquire(addr, val) \
+ AO_int_fetch_and_add_full(addr, val)
+# define AO_HAVE_int_fetch_and_add_acquire
+# endif
+# if !defined(AO_HAVE_int_fetch_and_add_write)
+# define AO_int_fetch_and_add_write(addr, val) \
+ AO_int_fetch_and_add_full(addr, val)
+# define AO_HAVE_int_fetch_and_add_write
+# endif
+# if !defined(AO_HAVE_int_fetch_and_add_read)
+# define AO_int_fetch_and_add_read(addr, val) \
+ AO_int_fetch_and_add_full(addr, val)
+# define AO_HAVE_int_fetch_and_add_read
+# endif
+#endif /* AO_HAVE_int_fetch_and_add_full */
+
+#if defined(AO_HAVE_int_fetch_and_add) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_int_fetch_and_add_acquire)
+ AO_INLINE unsigned
+ AO_int_fetch_and_add_acquire(volatile unsigned *addr, unsigned incr)
+ {
+ unsigned result = AO_int_fetch_and_add(addr, incr);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_int_fetch_and_add_acquire
+#endif
+#if defined(AO_HAVE_int_fetch_and_add) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_int_fetch_and_add_release)
+# define AO_int_fetch_and_add_release(addr, incr) \
+ (AO_nop_full(), AO_int_fetch_and_add(addr, incr))
+# define AO_HAVE_int_fetch_and_add_release
+#endif
+
+#if !defined(AO_HAVE_int_fetch_and_add) \
+ && defined(AO_HAVE_int_fetch_and_add_release)
+# define AO_int_fetch_and_add(addr, val) \
+ AO_int_fetch_and_add_release(addr, val)
+# define AO_HAVE_int_fetch_and_add
+#endif
+#if !defined(AO_HAVE_int_fetch_and_add) \
+ && defined(AO_HAVE_int_fetch_and_add_acquire)
+# define AO_int_fetch_and_add(addr, val) \
+ AO_int_fetch_and_add_acquire(addr, val)
+# define AO_HAVE_int_fetch_and_add
+#endif
+#if !defined(AO_HAVE_int_fetch_and_add) \
+ && defined(AO_HAVE_int_fetch_and_add_write)
+# define AO_int_fetch_and_add(addr, val) \
+ AO_int_fetch_and_add_write(addr, val)
+# define AO_HAVE_int_fetch_and_add
+#endif
+#if !defined(AO_HAVE_int_fetch_and_add) \
+ && defined(AO_HAVE_int_fetch_and_add_read)
+# define AO_int_fetch_and_add(addr, val) \
+ AO_int_fetch_and_add_read(addr, val)
+# define AO_HAVE_int_fetch_and_add
+#endif
+
+#if defined(AO_HAVE_int_fetch_and_add_acquire) \
+ && defined(AO_HAVE_nop_full) && !defined(AO_HAVE_int_fetch_and_add_full)
+# define AO_int_fetch_and_add_full(addr, val) \
+ (AO_nop_full(), AO_int_fetch_and_add_acquire(addr, val))
+# define AO_HAVE_int_fetch_and_add_full
+#endif
+
+#if !defined(AO_HAVE_int_fetch_and_add_release_write) \
+ && defined(AO_HAVE_int_fetch_and_add_write)
+# define AO_int_fetch_and_add_release_write(addr, val) \
+ AO_int_fetch_and_add_write(addr, val)
+# define AO_HAVE_int_fetch_and_add_release_write
+#endif
+#if !defined(AO_HAVE_int_fetch_and_add_release_write) \
+ && defined(AO_HAVE_int_fetch_and_add_release)
+# define AO_int_fetch_and_add_release_write(addr, val) \
+ AO_int_fetch_and_add_release(addr, val)
+# define AO_HAVE_int_fetch_and_add_release_write
+#endif
+
+#if !defined(AO_HAVE_int_fetch_and_add_acquire_read) \
+ && defined(AO_HAVE_int_fetch_and_add_read)
+# define AO_int_fetch_and_add_acquire_read(addr, val) \
+ AO_int_fetch_and_add_read(addr, val)
+# define AO_HAVE_int_fetch_and_add_acquire_read
+#endif
+#if !defined(AO_HAVE_int_fetch_and_add_acquire_read) \
+ && defined(AO_HAVE_int_fetch_and_add_acquire)
+# define AO_int_fetch_and_add_acquire_read(addr, val) \
+ AO_int_fetch_and_add_acquire(addr, val)
+# define AO_HAVE_int_fetch_and_add_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_int_fetch_and_add_acquire_read)
+# define AO_int_fetch_and_add_dd_acquire_read(addr, val) \
+ AO_int_fetch_and_add_acquire_read(addr, val)
+# define AO_HAVE_int_fetch_and_add_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_int_fetch_and_add)
+# define AO_int_fetch_and_add_dd_acquire_read(addr, val) \
+ AO_int_fetch_and_add(addr, val)
+# define AO_HAVE_int_fetch_and_add_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* int_fetch_and_add1 */
+#if defined(AO_HAVE_int_fetch_and_add_full) \
+ && !defined(AO_HAVE_int_fetch_and_add1_full)
+# define AO_int_fetch_and_add1_full(addr) \
+ AO_int_fetch_and_add_full(addr, 1)
+# define AO_HAVE_int_fetch_and_add1_full
+#endif
+#if defined(AO_HAVE_int_fetch_and_add_release) \
+ && !defined(AO_HAVE_int_fetch_and_add1_release)
+# define AO_int_fetch_and_add1_release(addr) \
+ AO_int_fetch_and_add_release(addr, 1)
+# define AO_HAVE_int_fetch_and_add1_release
+#endif
+#if defined(AO_HAVE_int_fetch_and_add_acquire) \
+ && !defined(AO_HAVE_int_fetch_and_add1_acquire)
+# define AO_int_fetch_and_add1_acquire(addr) \
+ AO_int_fetch_and_add_acquire(addr, 1)
+# define AO_HAVE_int_fetch_and_add1_acquire
+#endif
+#if defined(AO_HAVE_int_fetch_and_add_write) \
+ && !defined(AO_HAVE_int_fetch_and_add1_write)
+# define AO_int_fetch_and_add1_write(addr) \
+ AO_int_fetch_and_add_write(addr, 1)
+# define AO_HAVE_int_fetch_and_add1_write
+#endif
+#if defined(AO_HAVE_int_fetch_and_add_read) \
+ && !defined(AO_HAVE_int_fetch_and_add1_read)
+# define AO_int_fetch_and_add1_read(addr) \
+ AO_int_fetch_and_add_read(addr, 1)
+# define AO_HAVE_int_fetch_and_add1_read
+#endif
+#if defined(AO_HAVE_int_fetch_and_add_release_write) \
+ && !defined(AO_HAVE_int_fetch_and_add1_release_write)
+# define AO_int_fetch_and_add1_release_write(addr) \
+ AO_int_fetch_and_add_release_write(addr, 1)
+# define AO_HAVE_int_fetch_and_add1_release_write
+#endif
+#if defined(AO_HAVE_int_fetch_and_add_acquire_read) \
+ && !defined(AO_HAVE_int_fetch_and_add1_acquire_read)
+# define AO_int_fetch_and_add1_acquire_read(addr) \
+ AO_int_fetch_and_add_acquire_read(addr, 1)
+# define AO_HAVE_int_fetch_and_add1_acquire_read
+#endif
+#if defined(AO_HAVE_int_fetch_and_add) \
+ && !defined(AO_HAVE_int_fetch_and_add1)
+# define AO_int_fetch_and_add1(addr) AO_int_fetch_and_add(addr, 1)
+# define AO_HAVE_int_fetch_and_add1
+#endif
+
+#if defined(AO_HAVE_int_fetch_and_add1_full)
+# if !defined(AO_HAVE_int_fetch_and_add1_release)
+# define AO_int_fetch_and_add1_release(addr) \
+ AO_int_fetch_and_add1_full(addr)
+# define AO_HAVE_int_fetch_and_add1_release
+# endif
+# if !defined(AO_HAVE_int_fetch_and_add1_acquire)
+# define AO_int_fetch_and_add1_acquire(addr) \
+ AO_int_fetch_and_add1_full(addr)
+# define AO_HAVE_int_fetch_and_add1_acquire
+# endif
+# if !defined(AO_HAVE_int_fetch_and_add1_write)
+# define AO_int_fetch_and_add1_write(addr) \
+ AO_int_fetch_and_add1_full(addr)
+# define AO_HAVE_int_fetch_and_add1_write
+# endif
+# if !defined(AO_HAVE_int_fetch_and_add1_read)
+# define AO_int_fetch_and_add1_read(addr) \
+ AO_int_fetch_and_add1_full(addr)
+# define AO_HAVE_int_fetch_and_add1_read
+# endif
+#endif /* AO_HAVE_int_fetch_and_add1_full */
+
+#if !defined(AO_HAVE_int_fetch_and_add1) \
+ && defined(AO_HAVE_int_fetch_and_add1_release)
+# define AO_int_fetch_and_add1(addr) AO_int_fetch_and_add1_release(addr)
+# define AO_HAVE_int_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_int_fetch_and_add1) \
+ && defined(AO_HAVE_int_fetch_and_add1_acquire)
+# define AO_int_fetch_and_add1(addr) AO_int_fetch_and_add1_acquire(addr)
+# define AO_HAVE_int_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_int_fetch_and_add1) \
+ && defined(AO_HAVE_int_fetch_and_add1_write)
+# define AO_int_fetch_and_add1(addr) AO_int_fetch_and_add1_write(addr)
+# define AO_HAVE_int_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_int_fetch_and_add1) \
+ && defined(AO_HAVE_int_fetch_and_add1_read)
+# define AO_int_fetch_and_add1(addr) AO_int_fetch_and_add1_read(addr)
+# define AO_HAVE_int_fetch_and_add1
+#endif
+
+#if defined(AO_HAVE_int_fetch_and_add1_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_int_fetch_and_add1_full)
+# define AO_int_fetch_and_add1_full(addr) \
+ (AO_nop_full(), AO_int_fetch_and_add1_acquire(addr))
+# define AO_HAVE_int_fetch_and_add1_full
+#endif
+
+#if !defined(AO_HAVE_int_fetch_and_add1_release_write) \
+ && defined(AO_HAVE_int_fetch_and_add1_write)
+# define AO_int_fetch_and_add1_release_write(addr) \
+ AO_int_fetch_and_add1_write(addr)
+# define AO_HAVE_int_fetch_and_add1_release_write
+#endif
+#if !defined(AO_HAVE_int_fetch_and_add1_release_write) \
+ && defined(AO_HAVE_int_fetch_and_add1_release)
+# define AO_int_fetch_and_add1_release_write(addr) \
+ AO_int_fetch_and_add1_release(addr)
+# define AO_HAVE_int_fetch_and_add1_release_write
+#endif
+#if !defined(AO_HAVE_int_fetch_and_add1_acquire_read) \
+ && defined(AO_HAVE_int_fetch_and_add1_read)
+# define AO_int_fetch_and_add1_acquire_read(addr) \
+ AO_int_fetch_and_add1_read(addr)
+# define AO_HAVE_int_fetch_and_add1_acquire_read
+#endif
+#if !defined(AO_HAVE_int_fetch_and_add1_acquire_read) \
+ && defined(AO_HAVE_int_fetch_and_add1_acquire)
+# define AO_int_fetch_and_add1_acquire_read(addr) \
+ AO_int_fetch_and_add1_acquire(addr)
+# define AO_HAVE_int_fetch_and_add1_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_int_fetch_and_add1_acquire_read)
+# define AO_int_fetch_and_add1_dd_acquire_read(addr) \
+ AO_int_fetch_and_add1_acquire_read(addr)
+# define AO_HAVE_int_fetch_and_add1_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_int_fetch_and_add1)
+# define AO_int_fetch_and_add1_dd_acquire_read(addr) \
+ AO_int_fetch_and_add1(addr)
+# define AO_HAVE_int_fetch_and_add1_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* int_fetch_and_sub1 */
+#if defined(AO_HAVE_int_fetch_and_add_full) \
+ && !defined(AO_HAVE_int_fetch_and_sub1_full)
+# define AO_int_fetch_and_sub1_full(addr) \
+ AO_int_fetch_and_add_full(addr, (unsigned)(-1))
+# define AO_HAVE_int_fetch_and_sub1_full
+#endif
+#if defined(AO_HAVE_int_fetch_and_add_release) \
+ && !defined(AO_HAVE_int_fetch_and_sub1_release)
+# define AO_int_fetch_and_sub1_release(addr) \
+ AO_int_fetch_and_add_release(addr, (unsigned)(-1))
+# define AO_HAVE_int_fetch_and_sub1_release
+#endif
+#if defined(AO_HAVE_int_fetch_and_add_acquire) \
+ && !defined(AO_HAVE_int_fetch_and_sub1_acquire)
+# define AO_int_fetch_and_sub1_acquire(addr) \
+ AO_int_fetch_and_add_acquire(addr, (unsigned)(-1))
+# define AO_HAVE_int_fetch_and_sub1_acquire
+#endif
+#if defined(AO_HAVE_int_fetch_and_add_write) \
+ && !defined(AO_HAVE_int_fetch_and_sub1_write)
+# define AO_int_fetch_and_sub1_write(addr) \
+ AO_int_fetch_and_add_write(addr, (unsigned)(-1))
+# define AO_HAVE_int_fetch_and_sub1_write
+#endif
+#if defined(AO_HAVE_int_fetch_and_add_read) \
+ && !defined(AO_HAVE_int_fetch_and_sub1_read)
+# define AO_int_fetch_and_sub1_read(addr) \
+ AO_int_fetch_and_add_read(addr, (unsigned)(-1))
+# define AO_HAVE_int_fetch_and_sub1_read
+#endif
+#if defined(AO_HAVE_int_fetch_and_add_release_write) \
+ && !defined(AO_HAVE_int_fetch_and_sub1_release_write)
+# define AO_int_fetch_and_sub1_release_write(addr) \
+ AO_int_fetch_and_add_release_write(addr, (unsigned)(-1))
+# define AO_HAVE_int_fetch_and_sub1_release_write
+#endif
+#if defined(AO_HAVE_int_fetch_and_add_acquire_read) \
+ && !defined(AO_HAVE_int_fetch_and_sub1_acquire_read)
+# define AO_int_fetch_and_sub1_acquire_read(addr) \
+ AO_int_fetch_and_add_acquire_read(addr, (unsigned)(-1))
+# define AO_HAVE_int_fetch_and_sub1_acquire_read
+#endif
+#if defined(AO_HAVE_int_fetch_and_add) \
+ && !defined(AO_HAVE_int_fetch_and_sub1)
+# define AO_int_fetch_and_sub1(addr) \
+ AO_int_fetch_and_add(addr, (unsigned)(-1))
+# define AO_HAVE_int_fetch_and_sub1
+#endif
+
+#if defined(AO_HAVE_int_fetch_and_sub1_full)
+# if !defined(AO_HAVE_int_fetch_and_sub1_release)
+# define AO_int_fetch_and_sub1_release(addr) \
+ AO_int_fetch_and_sub1_full(addr)
+# define AO_HAVE_int_fetch_and_sub1_release
+# endif
+# if !defined(AO_HAVE_int_fetch_and_sub1_acquire)
+# define AO_int_fetch_and_sub1_acquire(addr) \
+ AO_int_fetch_and_sub1_full(addr)
+# define AO_HAVE_int_fetch_and_sub1_acquire
+# endif
+# if !defined(AO_HAVE_int_fetch_and_sub1_write)
+# define AO_int_fetch_and_sub1_write(addr) \
+ AO_int_fetch_and_sub1_full(addr)
+# define AO_HAVE_int_fetch_and_sub1_write
+# endif
+# if !defined(AO_HAVE_int_fetch_and_sub1_read)
+# define AO_int_fetch_and_sub1_read(addr) \
+ AO_int_fetch_and_sub1_full(addr)
+# define AO_HAVE_int_fetch_and_sub1_read
+# endif
+#endif /* AO_HAVE_int_fetch_and_sub1_full */
+
+#if !defined(AO_HAVE_int_fetch_and_sub1) \
+ && defined(AO_HAVE_int_fetch_and_sub1_release)
+# define AO_int_fetch_and_sub1(addr) AO_int_fetch_and_sub1_release(addr)
+# define AO_HAVE_int_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_int_fetch_and_sub1) \
+ && defined(AO_HAVE_int_fetch_and_sub1_acquire)
+# define AO_int_fetch_and_sub1(addr) AO_int_fetch_and_sub1_acquire(addr)
+# define AO_HAVE_int_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_int_fetch_and_sub1) \
+ && defined(AO_HAVE_int_fetch_and_sub1_write)
+# define AO_int_fetch_and_sub1(addr) AO_int_fetch_and_sub1_write(addr)
+# define AO_HAVE_int_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_int_fetch_and_sub1) \
+ && defined(AO_HAVE_int_fetch_and_sub1_read)
+# define AO_int_fetch_and_sub1(addr) AO_int_fetch_and_sub1_read(addr)
+# define AO_HAVE_int_fetch_and_sub1
+#endif
+
+#if defined(AO_HAVE_int_fetch_and_sub1_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_int_fetch_and_sub1_full)
+# define AO_int_fetch_and_sub1_full(addr) \
+ (AO_nop_full(), AO_int_fetch_and_sub1_acquire(addr))
+# define AO_HAVE_int_fetch_and_sub1_full
+#endif
+
+#if !defined(AO_HAVE_int_fetch_and_sub1_release_write) \
+ && defined(AO_HAVE_int_fetch_and_sub1_write)
+# define AO_int_fetch_and_sub1_release_write(addr) \
+ AO_int_fetch_and_sub1_write(addr)
+# define AO_HAVE_int_fetch_and_sub1_release_write
+#endif
+#if !defined(AO_HAVE_int_fetch_and_sub1_release_write) \
+ && defined(AO_HAVE_int_fetch_and_sub1_release)
+# define AO_int_fetch_and_sub1_release_write(addr) \
+ AO_int_fetch_and_sub1_release(addr)
+# define AO_HAVE_int_fetch_and_sub1_release_write
+#endif
+#if !defined(AO_HAVE_int_fetch_and_sub1_acquire_read) \
+ && defined(AO_HAVE_int_fetch_and_sub1_read)
+# define AO_int_fetch_and_sub1_acquire_read(addr) \
+ AO_int_fetch_and_sub1_read(addr)
+# define AO_HAVE_int_fetch_and_sub1_acquire_read
+#endif
+#if !defined(AO_HAVE_int_fetch_and_sub1_acquire_read) \
+ && defined(AO_HAVE_int_fetch_and_sub1_acquire)
+# define AO_int_fetch_and_sub1_acquire_read(addr) \
+ AO_int_fetch_and_sub1_acquire(addr)
+# define AO_HAVE_int_fetch_and_sub1_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_int_fetch_and_sub1_acquire_read)
+# define AO_int_fetch_and_sub1_dd_acquire_read(addr) \
+ AO_int_fetch_and_sub1_acquire_read(addr)
+# define AO_HAVE_int_fetch_and_sub1_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_int_fetch_and_sub1)
+# define AO_int_fetch_and_sub1_dd_acquire_read(addr) \
+ AO_int_fetch_and_sub1(addr)
+# define AO_HAVE_int_fetch_and_sub1_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* int_and */
+#if defined(AO_HAVE_int_compare_and_swap_full) \
+ && !defined(AO_HAVE_int_and_full)
+ AO_INLINE void
+ AO_int_and_full(volatile unsigned *addr, unsigned value)
+ {
+ unsigned old;
+
+ do
+ {
+ old = *(unsigned *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_full(addr, old,
+ old & value)));
+ }
+# define AO_HAVE_int_and_full
+#endif
+
+#if defined(AO_HAVE_int_and_full)
+# if !defined(AO_HAVE_int_and_release)
+# define AO_int_and_release(addr, val) AO_int_and_full(addr, val)
+# define AO_HAVE_int_and_release
+# endif
+# if !defined(AO_HAVE_int_and_acquire)
+# define AO_int_and_acquire(addr, val) AO_int_and_full(addr, val)
+# define AO_HAVE_int_and_acquire
+# endif
+# if !defined(AO_HAVE_int_and_write)
+# define AO_int_and_write(addr, val) AO_int_and_full(addr, val)
+# define AO_HAVE_int_and_write
+# endif
+# if !defined(AO_HAVE_int_and_read)
+# define AO_int_and_read(addr, val) AO_int_and_full(addr, val)
+# define AO_HAVE_int_and_read
+# endif
+#endif /* AO_HAVE_int_and_full */
+
+#if !defined(AO_HAVE_int_and) && defined(AO_HAVE_int_and_release)
+# define AO_int_and(addr, val) AO_int_and_release(addr, val)
+# define AO_HAVE_int_and
+#endif
+#if !defined(AO_HAVE_int_and) && defined(AO_HAVE_int_and_acquire)
+# define AO_int_and(addr, val) AO_int_and_acquire(addr, val)
+# define AO_HAVE_int_and
+#endif
+#if !defined(AO_HAVE_int_and) && defined(AO_HAVE_int_and_write)
+# define AO_int_and(addr, val) AO_int_and_write(addr, val)
+# define AO_HAVE_int_and
+#endif
+#if !defined(AO_HAVE_int_and) && defined(AO_HAVE_int_and_read)
+# define AO_int_and(addr, val) AO_int_and_read(addr, val)
+# define AO_HAVE_int_and
+#endif
+
+#if defined(AO_HAVE_int_and_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_int_and_full)
+# define AO_int_and_full(addr, val) \
+ (AO_nop_full(), AO_int_and_acquire(addr, val))
+# define AO_HAVE_int_and_full
+#endif
+
+#if !defined(AO_HAVE_int_and_release_write) \
+ && defined(AO_HAVE_int_and_write)
+# define AO_int_and_release_write(addr, val) AO_int_and_write(addr, val)
+# define AO_HAVE_int_and_release_write
+#endif
+#if !defined(AO_HAVE_int_and_release_write) \
+ && defined(AO_HAVE_int_and_release)
+# define AO_int_and_release_write(addr, val) AO_int_and_release(addr, val)
+# define AO_HAVE_int_and_release_write
+#endif
+#if !defined(AO_HAVE_int_and_acquire_read) \
+ && defined(AO_HAVE_int_and_read)
+# define AO_int_and_acquire_read(addr, val) AO_int_and_read(addr, val)
+# define AO_HAVE_int_and_acquire_read
+#endif
+#if !defined(AO_HAVE_int_and_acquire_read) \
+ && defined(AO_HAVE_int_and_acquire)
+# define AO_int_and_acquire_read(addr, val) AO_int_and_acquire(addr, val)
+# define AO_HAVE_int_and_acquire_read
+#endif
+
+/* int_or */
+#if defined(AO_HAVE_int_compare_and_swap_full) \
+ && !defined(AO_HAVE_int_or_full)
+ AO_INLINE void
+ AO_int_or_full(volatile unsigned *addr, unsigned value)
+ {
+ unsigned old;
+
+ do
+ {
+ old = *(unsigned *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_full(addr, old,
+ old | value)));
+ }
+# define AO_HAVE_int_or_full
+#endif
+
+#if defined(AO_HAVE_int_or_full)
+# if !defined(AO_HAVE_int_or_release)
+# define AO_int_or_release(addr, val) AO_int_or_full(addr, val)
+# define AO_HAVE_int_or_release
+# endif
+# if !defined(AO_HAVE_int_or_acquire)
+# define AO_int_or_acquire(addr, val) AO_int_or_full(addr, val)
+# define AO_HAVE_int_or_acquire
+# endif
+# if !defined(AO_HAVE_int_or_write)
+# define AO_int_or_write(addr, val) AO_int_or_full(addr, val)
+# define AO_HAVE_int_or_write
+# endif
+# if !defined(AO_HAVE_int_or_read)
+# define AO_int_or_read(addr, val) AO_int_or_full(addr, val)
+# define AO_HAVE_int_or_read
+# endif
+#endif /* AO_HAVE_int_or_full */
+
+#if !defined(AO_HAVE_int_or) && defined(AO_HAVE_int_or_release)
+# define AO_int_or(addr, val) AO_int_or_release(addr, val)
+# define AO_HAVE_int_or
+#endif
+#if !defined(AO_HAVE_int_or) && defined(AO_HAVE_int_or_acquire)
+# define AO_int_or(addr, val) AO_int_or_acquire(addr, val)
+# define AO_HAVE_int_or
+#endif
+#if !defined(AO_HAVE_int_or) && defined(AO_HAVE_int_or_write)
+# define AO_int_or(addr, val) AO_int_or_write(addr, val)
+# define AO_HAVE_int_or
+#endif
+#if !defined(AO_HAVE_int_or) && defined(AO_HAVE_int_or_read)
+# define AO_int_or(addr, val) AO_int_or_read(addr, val)
+# define AO_HAVE_int_or
+#endif
+
+#if defined(AO_HAVE_int_or_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_int_or_full)
+# define AO_int_or_full(addr, val) \
+ (AO_nop_full(), AO_int_or_acquire(addr, val))
+# define AO_HAVE_int_or_full
+#endif
+
+#if !defined(AO_HAVE_int_or_release_write) \
+ && defined(AO_HAVE_int_or_write)
+# define AO_int_or_release_write(addr, val) AO_int_or_write(addr, val)
+# define AO_HAVE_int_or_release_write
+#endif
+#if !defined(AO_HAVE_int_or_release_write) \
+ && defined(AO_HAVE_int_or_release)
+# define AO_int_or_release_write(addr, val) AO_int_or_release(addr, val)
+# define AO_HAVE_int_or_release_write
+#endif
+#if !defined(AO_HAVE_int_or_acquire_read) && defined(AO_HAVE_int_or_read)
+# define AO_int_or_acquire_read(addr, val) AO_int_or_read(addr, val)
+# define AO_HAVE_int_or_acquire_read
+#endif
+#if !defined(AO_HAVE_int_or_acquire_read) \
+ && defined(AO_HAVE_int_or_acquire)
+# define AO_int_or_acquire_read(addr, val) AO_int_or_acquire(addr, val)
+# define AO_HAVE_int_or_acquire_read
+#endif
+
+/* int_xor */
+#if defined(AO_HAVE_int_compare_and_swap_full) \
+ && !defined(AO_HAVE_int_xor_full)
+ AO_INLINE void
+ AO_int_xor_full(volatile unsigned *addr, unsigned value)
+ {
+ unsigned old;
+
+ do
+ {
+ old = *(unsigned *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_full(addr, old,
+ old ^ value)));
+ }
+# define AO_HAVE_int_xor_full
+#endif
+
+#if defined(AO_HAVE_int_xor_full)
+# if !defined(AO_HAVE_int_xor_release)
+# define AO_int_xor_release(addr, val) AO_int_xor_full(addr, val)
+# define AO_HAVE_int_xor_release
+# endif
+# if !defined(AO_HAVE_int_xor_acquire)
+# define AO_int_xor_acquire(addr, val) AO_int_xor_full(addr, val)
+# define AO_HAVE_int_xor_acquire
+# endif
+# if !defined(AO_HAVE_int_xor_write)
+# define AO_int_xor_write(addr, val) AO_int_xor_full(addr, val)
+# define AO_HAVE_int_xor_write
+# endif
+# if !defined(AO_HAVE_int_xor_read)
+# define AO_int_xor_read(addr, val) AO_int_xor_full(addr, val)
+# define AO_HAVE_int_xor_read
+# endif
+#endif /* AO_HAVE_int_xor_full */
+
+#if !defined(AO_HAVE_int_xor) && defined(AO_HAVE_int_xor_release)
+# define AO_int_xor(addr, val) AO_int_xor_release(addr, val)
+# define AO_HAVE_int_xor
+#endif
+#if !defined(AO_HAVE_int_xor) && defined(AO_HAVE_int_xor_acquire)
+# define AO_int_xor(addr, val) AO_int_xor_acquire(addr, val)
+# define AO_HAVE_int_xor
+#endif
+#if !defined(AO_HAVE_int_xor) && defined(AO_HAVE_int_xor_write)
+# define AO_int_xor(addr, val) AO_int_xor_write(addr, val)
+# define AO_HAVE_int_xor
+#endif
+#if !defined(AO_HAVE_int_xor) && defined(AO_HAVE_int_xor_read)
+# define AO_int_xor(addr, val) AO_int_xor_read(addr, val)
+# define AO_HAVE_int_xor
+#endif
+
+#if defined(AO_HAVE_int_xor_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_int_xor_full)
+# define AO_int_xor_full(addr, val) \
+ (AO_nop_full(), AO_int_xor_acquire(addr, val))
+# define AO_HAVE_int_xor_full
+#endif
+
+#if !defined(AO_HAVE_int_xor_release_write) \
+ && defined(AO_HAVE_int_xor_write)
+# define AO_int_xor_release_write(addr, val) AO_int_xor_write(addr, val)
+# define AO_HAVE_int_xor_release_write
+#endif
+#if !defined(AO_HAVE_int_xor_release_write) \
+ && defined(AO_HAVE_int_xor_release)
+# define AO_int_xor_release_write(addr, val) AO_int_xor_release(addr, val)
+# define AO_HAVE_int_xor_release_write
+#endif
+#if !defined(AO_HAVE_int_xor_acquire_read) \
+ && defined(AO_HAVE_int_xor_read)
+# define AO_int_xor_acquire_read(addr, val) AO_int_xor_read(addr, val)
+# define AO_HAVE_int_xor_acquire_read
+#endif
+#if !defined(AO_HAVE_int_xor_acquire_read) \
+ && defined(AO_HAVE_int_xor_acquire)
+# define AO_int_xor_acquire_read(addr, val) AO_int_xor_acquire(addr, val)
+# define AO_HAVE_int_xor_acquire_read
+#endif
+
+/* int_and/or/xor_dd_acquire_read are meaningless. */
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* compare_and_swap (based on fetch_compare_and_swap) */
+#if defined(AO_HAVE_fetch_compare_and_swap_full) \
+ && !defined(AO_HAVE_compare_and_swap_full)
+ AO_INLINE int
+ AO_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+ {
+ return AO_fetch_compare_and_swap_full(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_compare_and_swap_full
+#endif
+
+#if defined(AO_HAVE_fetch_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_compare_and_swap_acquire)
+ AO_INLINE int
+ AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+ {
+ return AO_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_compare_and_swap_acquire
+#endif
+
+#if defined(AO_HAVE_fetch_compare_and_swap_release) \
+ && !defined(AO_HAVE_compare_and_swap_release)
+ AO_INLINE int
+ AO_compare_and_swap_release(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+ {
+ return AO_fetch_compare_and_swap_release(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_compare_and_swap_release
+#endif
+
+#if defined(AO_HAVE_fetch_compare_and_swap_write) \
+ && !defined(AO_HAVE_compare_and_swap_write)
+ AO_INLINE int
+ AO_compare_and_swap_write(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+ {
+ return AO_fetch_compare_and_swap_write(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_compare_and_swap_write
+#endif
+
+#if defined(AO_HAVE_fetch_compare_and_swap_read) \
+ && !defined(AO_HAVE_compare_and_swap_read)
+ AO_INLINE int
+ AO_compare_and_swap_read(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+ {
+ return AO_fetch_compare_and_swap_read(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_compare_and_swap_read
+#endif
+
+#if defined(AO_HAVE_fetch_compare_and_swap) \
+ && !defined(AO_HAVE_compare_and_swap)
+ AO_INLINE int
+ AO_compare_and_swap(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+ {
+ return AO_fetch_compare_and_swap(addr, old_val, new_val) == old_val;
+ }
+# define AO_HAVE_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_fetch_compare_and_swap_release_write) \
+ && !defined(AO_HAVE_compare_and_swap_release_write)
+ AO_INLINE int
+ AO_compare_and_swap_release_write(volatile AO_t *addr,
+ AO_t old_val, AO_t new_val)
+ {
+ return AO_fetch_compare_and_swap_release_write(addr, old_val,
+ new_val) == old_val;
+ }
+# define AO_HAVE_compare_and_swap_release_write
+#endif
+
+#if defined(AO_HAVE_fetch_compare_and_swap_acquire_read) \
+ && !defined(AO_HAVE_compare_and_swap_acquire_read)
+ AO_INLINE int
+ AO_compare_and_swap_acquire_read(volatile AO_t *addr,
+ AO_t old_val, AO_t new_val)
+ {
+ return AO_fetch_compare_and_swap_acquire_read(addr, old_val,
+ new_val) == old_val;
+ }
+# define AO_HAVE_compare_and_swap_acquire_read
+#endif
+
+#if defined(AO_HAVE_fetch_compare_and_swap_dd_acquire_read) \
+ && !defined(AO_HAVE_compare_and_swap_dd_acquire_read)
+ AO_INLINE int
+ AO_compare_and_swap_dd_acquire_read(volatile AO_t *addr,
+ AO_t old_val, AO_t new_val)
+ {
+ return AO_fetch_compare_and_swap_dd_acquire_read(addr, old_val,
+ new_val) == old_val;
+ }
+# define AO_HAVE_compare_and_swap_dd_acquire_read
+#endif
+
+/* fetch_and_add */
+/* We first try to implement fetch_and_add variants in terms of the */
+/* corresponding compare_and_swap variants to minimize adding barriers. */
+#if defined(AO_HAVE_compare_and_swap_full) \
+ && !defined(AO_HAVE_fetch_and_add_full)
+ AO_INLINE AO_t
+ AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr)
+ {
+ AO_t old;
+
+ do
+ {
+ old = *(AO_t *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_compare_and_swap_full(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_fetch_and_add_full
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_fetch_and_add_acquire)
+ AO_INLINE AO_t
+ AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr)
+ {
+ AO_t old;
+
+ do
+ {
+ old = *(AO_t *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_compare_and_swap_acquire(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_fetch_and_add_acquire
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_release) \
+ && !defined(AO_HAVE_fetch_and_add_release)
+ AO_INLINE AO_t
+ AO_fetch_and_add_release(volatile AO_t *addr, AO_t incr)
+ {
+ AO_t old;
+
+ do
+ {
+ old = *(AO_t *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_compare_and_swap_release(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_fetch_and_add_release
+#endif
+
+#if defined(AO_HAVE_compare_and_swap) \
+ && !defined(AO_HAVE_fetch_and_add)
+ AO_INLINE AO_t
+ AO_fetch_and_add(volatile AO_t *addr, AO_t incr)
+ {
+ AO_t old;
+
+ do
+ {
+ old = *(AO_t *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_compare_and_swap(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_fetch_and_add
+#endif
+
+#if defined(AO_HAVE_fetch_and_add_full)
+# if !defined(AO_HAVE_fetch_and_add_release)
+# define AO_fetch_and_add_release(addr, val) \
+ AO_fetch_and_add_full(addr, val)
+# define AO_HAVE_fetch_and_add_release
+# endif
+# if !defined(AO_HAVE_fetch_and_add_acquire)
+# define AO_fetch_and_add_acquire(addr, val) \
+ AO_fetch_and_add_full(addr, val)
+# define AO_HAVE_fetch_and_add_acquire
+# endif
+# if !defined(AO_HAVE_fetch_and_add_write)
+# define AO_fetch_and_add_write(addr, val) \
+ AO_fetch_and_add_full(addr, val)
+# define AO_HAVE_fetch_and_add_write
+# endif
+# if !defined(AO_HAVE_fetch_and_add_read)
+# define AO_fetch_and_add_read(addr, val) \
+ AO_fetch_and_add_full(addr, val)
+# define AO_HAVE_fetch_and_add_read
+# endif
+#endif /* AO_HAVE_fetch_and_add_full */
+
+#if defined(AO_HAVE_fetch_and_add) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_fetch_and_add_acquire)
+ AO_INLINE AO_t
+ AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr)
+ {
+ AO_t result = AO_fetch_and_add(addr, incr);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_fetch_and_add_acquire
+#endif
+#if defined(AO_HAVE_fetch_and_add) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_fetch_and_add_release)
+# define AO_fetch_and_add_release(addr, incr) \
+ (AO_nop_full(), AO_fetch_and_add(addr, incr))
+# define AO_HAVE_fetch_and_add_release
+#endif
+
+#if !defined(AO_HAVE_fetch_and_add) \
+ && defined(AO_HAVE_fetch_and_add_release)
+# define AO_fetch_and_add(addr, val) \
+ AO_fetch_and_add_release(addr, val)
+# define AO_HAVE_fetch_and_add
+#endif
+#if !defined(AO_HAVE_fetch_and_add) \
+ && defined(AO_HAVE_fetch_and_add_acquire)
+# define AO_fetch_and_add(addr, val) \
+ AO_fetch_and_add_acquire(addr, val)
+# define AO_HAVE_fetch_and_add
+#endif
+#if !defined(AO_HAVE_fetch_and_add) \
+ && defined(AO_HAVE_fetch_and_add_write)
+# define AO_fetch_and_add(addr, val) \
+ AO_fetch_and_add_write(addr, val)
+# define AO_HAVE_fetch_and_add
+#endif
+#if !defined(AO_HAVE_fetch_and_add) \
+ && defined(AO_HAVE_fetch_and_add_read)
+# define AO_fetch_and_add(addr, val) \
+ AO_fetch_and_add_read(addr, val)
+# define AO_HAVE_fetch_and_add
+#endif
+
+#if defined(AO_HAVE_fetch_and_add_acquire) \
+ && defined(AO_HAVE_nop_full) && !defined(AO_HAVE_fetch_and_add_full)
+# define AO_fetch_and_add_full(addr, val) \
+ (AO_nop_full(), AO_fetch_and_add_acquire(addr, val))
+# define AO_HAVE_fetch_and_add_full
+#endif
+
+#if !defined(AO_HAVE_fetch_and_add_release_write) \
+ && defined(AO_HAVE_fetch_and_add_write)
+# define AO_fetch_and_add_release_write(addr, val) \
+ AO_fetch_and_add_write(addr, val)
+# define AO_HAVE_fetch_and_add_release_write
+#endif
+#if !defined(AO_HAVE_fetch_and_add_release_write) \
+ && defined(AO_HAVE_fetch_and_add_release)
+# define AO_fetch_and_add_release_write(addr, val) \
+ AO_fetch_and_add_release(addr, val)
+# define AO_HAVE_fetch_and_add_release_write
+#endif
+
+#if !defined(AO_HAVE_fetch_and_add_acquire_read) \
+ && defined(AO_HAVE_fetch_and_add_read)
+# define AO_fetch_and_add_acquire_read(addr, val) \
+ AO_fetch_and_add_read(addr, val)
+# define AO_HAVE_fetch_and_add_acquire_read
+#endif
+#if !defined(AO_HAVE_fetch_and_add_acquire_read) \
+ && defined(AO_HAVE_fetch_and_add_acquire)
+# define AO_fetch_and_add_acquire_read(addr, val) \
+ AO_fetch_and_add_acquire(addr, val)
+# define AO_HAVE_fetch_and_add_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_fetch_and_add_acquire_read)
+# define AO_fetch_and_add_dd_acquire_read(addr, val) \
+ AO_fetch_and_add_acquire_read(addr, val)
+# define AO_HAVE_fetch_and_add_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_fetch_and_add)
+# define AO_fetch_and_add_dd_acquire_read(addr, val) \
+ AO_fetch_and_add(addr, val)
+# define AO_HAVE_fetch_and_add_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* fetch_and_add1 */
+#if defined(AO_HAVE_fetch_and_add_full) \
+ && !defined(AO_HAVE_fetch_and_add1_full)
+# define AO_fetch_and_add1_full(addr) \
+ AO_fetch_and_add_full(addr, 1)
+# define AO_HAVE_fetch_and_add1_full
+#endif
+#if defined(AO_HAVE_fetch_and_add_release) \
+ && !defined(AO_HAVE_fetch_and_add1_release)
+# define AO_fetch_and_add1_release(addr) \
+ AO_fetch_and_add_release(addr, 1)
+# define AO_HAVE_fetch_and_add1_release
+#endif
+#if defined(AO_HAVE_fetch_and_add_acquire) \
+ && !defined(AO_HAVE_fetch_and_add1_acquire)
+# define AO_fetch_and_add1_acquire(addr) \
+ AO_fetch_and_add_acquire(addr, 1)
+# define AO_HAVE_fetch_and_add1_acquire
+#endif
+#if defined(AO_HAVE_fetch_and_add_write) \
+ && !defined(AO_HAVE_fetch_and_add1_write)
+# define AO_fetch_and_add1_write(addr) \
+ AO_fetch_and_add_write(addr, 1)
+# define AO_HAVE_fetch_and_add1_write
+#endif
+#if defined(AO_HAVE_fetch_and_add_read) \
+ && !defined(AO_HAVE_fetch_and_add1_read)
+# define AO_fetch_and_add1_read(addr) \
+ AO_fetch_and_add_read(addr, 1)
+# define AO_HAVE_fetch_and_add1_read
+#endif
+#if defined(AO_HAVE_fetch_and_add_release_write) \
+ && !defined(AO_HAVE_fetch_and_add1_release_write)
+# define AO_fetch_and_add1_release_write(addr) \
+ AO_fetch_and_add_release_write(addr, 1)
+# define AO_HAVE_fetch_and_add1_release_write
+#endif
+#if defined(AO_HAVE_fetch_and_add_acquire_read) \
+ && !defined(AO_HAVE_fetch_and_add1_acquire_read)
+# define AO_fetch_and_add1_acquire_read(addr) \
+ AO_fetch_and_add_acquire_read(addr, 1)
+# define AO_HAVE_fetch_and_add1_acquire_read
+#endif
+#if defined(AO_HAVE_fetch_and_add) \
+ && !defined(AO_HAVE_fetch_and_add1)
+# define AO_fetch_and_add1(addr) AO_fetch_and_add(addr, 1)
+# define AO_HAVE_fetch_and_add1
+#endif
+
+#if defined(AO_HAVE_fetch_and_add1_full)
+# if !defined(AO_HAVE_fetch_and_add1_release)
+# define AO_fetch_and_add1_release(addr) \
+ AO_fetch_and_add1_full(addr)
+# define AO_HAVE_fetch_and_add1_release
+# endif
+# if !defined(AO_HAVE_fetch_and_add1_acquire)
+# define AO_fetch_and_add1_acquire(addr) \
+ AO_fetch_and_add1_full(addr)
+# define AO_HAVE_fetch_and_add1_acquire
+# endif
+# if !defined(AO_HAVE_fetch_and_add1_write)
+# define AO_fetch_and_add1_write(addr) \
+ AO_fetch_and_add1_full(addr)
+# define AO_HAVE_fetch_and_add1_write
+# endif
+# if !defined(AO_HAVE_fetch_and_add1_read)
+# define AO_fetch_and_add1_read(addr) \
+ AO_fetch_and_add1_full(addr)
+# define AO_HAVE_fetch_and_add1_read
+# endif
+#endif /* AO_HAVE_fetch_and_add1_full */
+
+#if !defined(AO_HAVE_fetch_and_add1) \
+ && defined(AO_HAVE_fetch_and_add1_release)
+# define AO_fetch_and_add1(addr) AO_fetch_and_add1_release(addr)
+# define AO_HAVE_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_fetch_and_add1) \
+ && defined(AO_HAVE_fetch_and_add1_acquire)
+# define AO_fetch_and_add1(addr) AO_fetch_and_add1_acquire(addr)
+# define AO_HAVE_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_fetch_and_add1) \
+ && defined(AO_HAVE_fetch_and_add1_write)
+# define AO_fetch_and_add1(addr) AO_fetch_and_add1_write(addr)
+# define AO_HAVE_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_fetch_and_add1) \
+ && defined(AO_HAVE_fetch_and_add1_read)
+# define AO_fetch_and_add1(addr) AO_fetch_and_add1_read(addr)
+# define AO_HAVE_fetch_and_add1
+#endif
+
+#if defined(AO_HAVE_fetch_and_add1_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_fetch_and_add1_full)
+# define AO_fetch_and_add1_full(addr) \
+ (AO_nop_full(), AO_fetch_and_add1_acquire(addr))
+# define AO_HAVE_fetch_and_add1_full
+#endif
+
+#if !defined(AO_HAVE_fetch_and_add1_release_write) \
+ && defined(AO_HAVE_fetch_and_add1_write)
+# define AO_fetch_and_add1_release_write(addr) \
+ AO_fetch_and_add1_write(addr)
+# define AO_HAVE_fetch_and_add1_release_write
+#endif
+#if !defined(AO_HAVE_fetch_and_add1_release_write) \
+ && defined(AO_HAVE_fetch_and_add1_release)
+# define AO_fetch_and_add1_release_write(addr) \
+ AO_fetch_and_add1_release(addr)
+# define AO_HAVE_fetch_and_add1_release_write
+#endif
+#if !defined(AO_HAVE_fetch_and_add1_acquire_read) \
+ && defined(AO_HAVE_fetch_and_add1_read)
+# define AO_fetch_and_add1_acquire_read(addr) \
+ AO_fetch_and_add1_read(addr)
+# define AO_HAVE_fetch_and_add1_acquire_read
+#endif
+#if !defined(AO_HAVE_fetch_and_add1_acquire_read) \
+ && defined(AO_HAVE_fetch_and_add1_acquire)
+# define AO_fetch_and_add1_acquire_read(addr) \
+ AO_fetch_and_add1_acquire(addr)
+# define AO_HAVE_fetch_and_add1_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_fetch_and_add1_acquire_read)
+# define AO_fetch_and_add1_dd_acquire_read(addr) \
+ AO_fetch_and_add1_acquire_read(addr)
+# define AO_HAVE_fetch_and_add1_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_fetch_and_add1)
+# define AO_fetch_and_add1_dd_acquire_read(addr) \
+ AO_fetch_and_add1(addr)
+# define AO_HAVE_fetch_and_add1_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* fetch_and_sub1 */
+#if defined(AO_HAVE_fetch_and_add_full) \
+ && !defined(AO_HAVE_fetch_and_sub1_full)
+# define AO_fetch_and_sub1_full(addr) \
+ AO_fetch_and_add_full(addr, (AO_t)(-1))
+# define AO_HAVE_fetch_and_sub1_full
+#endif
+#if defined(AO_HAVE_fetch_and_add_release) \
+ && !defined(AO_HAVE_fetch_and_sub1_release)
+# define AO_fetch_and_sub1_release(addr) \
+ AO_fetch_and_add_release(addr, (AO_t)(-1))
+# define AO_HAVE_fetch_and_sub1_release
+#endif
+#if defined(AO_HAVE_fetch_and_add_acquire) \
+ && !defined(AO_HAVE_fetch_and_sub1_acquire)
+# define AO_fetch_and_sub1_acquire(addr) \
+ AO_fetch_and_add_acquire(addr, (AO_t)(-1))
+# define AO_HAVE_fetch_and_sub1_acquire
+#endif
+#if defined(AO_HAVE_fetch_and_add_write) \
+ && !defined(AO_HAVE_fetch_and_sub1_write)
+# define AO_fetch_and_sub1_write(addr) \
+ AO_fetch_and_add_write(addr, (AO_t)(-1))
+# define AO_HAVE_fetch_and_sub1_write
+#endif
+#if defined(AO_HAVE_fetch_and_add_read) \
+ && !defined(AO_HAVE_fetch_and_sub1_read)
+# define AO_fetch_and_sub1_read(addr) \
+ AO_fetch_and_add_read(addr, (AO_t)(-1))
+# define AO_HAVE_fetch_and_sub1_read
+#endif
+#if defined(AO_HAVE_fetch_and_add_release_write) \
+ && !defined(AO_HAVE_fetch_and_sub1_release_write)
+# define AO_fetch_and_sub1_release_write(addr) \
+ AO_fetch_and_add_release_write(addr, (AO_t)(-1))
+# define AO_HAVE_fetch_and_sub1_release_write
+#endif
+#if defined(AO_HAVE_fetch_and_add_acquire_read) \
+ && !defined(AO_HAVE_fetch_and_sub1_acquire_read)
+# define AO_fetch_and_sub1_acquire_read(addr) \
+ AO_fetch_and_add_acquire_read(addr, (AO_t)(-1))
+# define AO_HAVE_fetch_and_sub1_acquire_read
+#endif
+#if defined(AO_HAVE_fetch_and_add) \
+ && !defined(AO_HAVE_fetch_and_sub1)
+# define AO_fetch_and_sub1(addr) \
+ AO_fetch_and_add(addr, (AO_t)(-1))
+# define AO_HAVE_fetch_and_sub1
+#endif
+
+#if defined(AO_HAVE_fetch_and_sub1_full)
+# if !defined(AO_HAVE_fetch_and_sub1_release)
+# define AO_fetch_and_sub1_release(addr) \
+ AO_fetch_and_sub1_full(addr)
+# define AO_HAVE_fetch_and_sub1_release
+# endif
+# if !defined(AO_HAVE_fetch_and_sub1_acquire)
+# define AO_fetch_and_sub1_acquire(addr) \
+ AO_fetch_and_sub1_full(addr)
+# define AO_HAVE_fetch_and_sub1_acquire
+# endif
+# if !defined(AO_HAVE_fetch_and_sub1_write)
+# define AO_fetch_and_sub1_write(addr) \
+ AO_fetch_and_sub1_full(addr)
+# define AO_HAVE_fetch_and_sub1_write
+# endif
+# if !defined(AO_HAVE_fetch_and_sub1_read)
+# define AO_fetch_and_sub1_read(addr) \
+ AO_fetch_and_sub1_full(addr)
+# define AO_HAVE_fetch_and_sub1_read
+# endif
+#endif /* AO_HAVE_fetch_and_sub1_full */
+
+#if !defined(AO_HAVE_fetch_and_sub1) \
+ && defined(AO_HAVE_fetch_and_sub1_release)
+# define AO_fetch_and_sub1(addr) AO_fetch_and_sub1_release(addr)
+# define AO_HAVE_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_fetch_and_sub1) \
+ && defined(AO_HAVE_fetch_and_sub1_acquire)
+# define AO_fetch_and_sub1(addr) AO_fetch_and_sub1_acquire(addr)
+# define AO_HAVE_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_fetch_and_sub1) \
+ && defined(AO_HAVE_fetch_and_sub1_write)
+# define AO_fetch_and_sub1(addr) AO_fetch_and_sub1_write(addr)
+# define AO_HAVE_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_fetch_and_sub1) \
+ && defined(AO_HAVE_fetch_and_sub1_read)
+# define AO_fetch_and_sub1(addr) AO_fetch_and_sub1_read(addr)
+# define AO_HAVE_fetch_and_sub1
+#endif
+
+#if defined(AO_HAVE_fetch_and_sub1_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_fetch_and_sub1_full)
+# define AO_fetch_and_sub1_full(addr) \
+ (AO_nop_full(), AO_fetch_and_sub1_acquire(addr))
+# define AO_HAVE_fetch_and_sub1_full
+#endif
+
+#if !defined(AO_HAVE_fetch_and_sub1_release_write) \
+ && defined(AO_HAVE_fetch_and_sub1_write)
+# define AO_fetch_and_sub1_release_write(addr) \
+ AO_fetch_and_sub1_write(addr)
+# define AO_HAVE_fetch_and_sub1_release_write
+#endif
+#if !defined(AO_HAVE_fetch_and_sub1_release_write) \
+ && defined(AO_HAVE_fetch_and_sub1_release)
+# define AO_fetch_and_sub1_release_write(addr) \
+ AO_fetch_and_sub1_release(addr)
+# define AO_HAVE_fetch_and_sub1_release_write
+#endif
+#if !defined(AO_HAVE_fetch_and_sub1_acquire_read) \
+ && defined(AO_HAVE_fetch_and_sub1_read)
+# define AO_fetch_and_sub1_acquire_read(addr) \
+ AO_fetch_and_sub1_read(addr)
+# define AO_HAVE_fetch_and_sub1_acquire_read
+#endif
+#if !defined(AO_HAVE_fetch_and_sub1_acquire_read) \
+ && defined(AO_HAVE_fetch_and_sub1_acquire)
+# define AO_fetch_and_sub1_acquire_read(addr) \
+ AO_fetch_and_sub1_acquire(addr)
+# define AO_HAVE_fetch_and_sub1_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_fetch_and_sub1_acquire_read)
+# define AO_fetch_and_sub1_dd_acquire_read(addr) \
+ AO_fetch_and_sub1_acquire_read(addr)
+# define AO_HAVE_fetch_and_sub1_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_fetch_and_sub1)
+# define AO_fetch_and_sub1_dd_acquire_read(addr) \
+ AO_fetch_and_sub1(addr)
+# define AO_HAVE_fetch_and_sub1_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* and */
+#if defined(AO_HAVE_compare_and_swap_full) \
+ && !defined(AO_HAVE_and_full)
+ AO_INLINE void
+ AO_and_full(volatile AO_t *addr, AO_t value)
+ {
+ AO_t old;
+
+ do
+ {
+ old = *(AO_t *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_compare_and_swap_full(addr, old,
+ old & value)));
+ }
+# define AO_HAVE_and_full
+#endif
+
+#if defined(AO_HAVE_and_full)
+# if !defined(AO_HAVE_and_release)
+# define AO_and_release(addr, val) AO_and_full(addr, val)
+# define AO_HAVE_and_release
+# endif
+# if !defined(AO_HAVE_and_acquire)
+# define AO_and_acquire(addr, val) AO_and_full(addr, val)
+# define AO_HAVE_and_acquire
+# endif
+# if !defined(AO_HAVE_and_write)
+# define AO_and_write(addr, val) AO_and_full(addr, val)
+# define AO_HAVE_and_write
+# endif
+# if !defined(AO_HAVE_and_read)
+# define AO_and_read(addr, val) AO_and_full(addr, val)
+# define AO_HAVE_and_read
+# endif
+#endif /* AO_HAVE_and_full */
+
+#if !defined(AO_HAVE_and) && defined(AO_HAVE_and_release)
+# define AO_and(addr, val) AO_and_release(addr, val)
+# define AO_HAVE_and
+#endif
+#if !defined(AO_HAVE_and) && defined(AO_HAVE_and_acquire)
+# define AO_and(addr, val) AO_and_acquire(addr, val)
+# define AO_HAVE_and
+#endif
+#if !defined(AO_HAVE_and) && defined(AO_HAVE_and_write)
+# define AO_and(addr, val) AO_and_write(addr, val)
+# define AO_HAVE_and
+#endif
+#if !defined(AO_HAVE_and) && defined(AO_HAVE_and_read)
+# define AO_and(addr, val) AO_and_read(addr, val)
+# define AO_HAVE_and
+#endif
+
+#if defined(AO_HAVE_and_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_and_full)
+# define AO_and_full(addr, val) \
+ (AO_nop_full(), AO_and_acquire(addr, val))
+# define AO_HAVE_and_full
+#endif
+
+#if !defined(AO_HAVE_and_release_write) \
+ && defined(AO_HAVE_and_write)
+# define AO_and_release_write(addr, val) AO_and_write(addr, val)
+# define AO_HAVE_and_release_write
+#endif
+#if !defined(AO_HAVE_and_release_write) \
+ && defined(AO_HAVE_and_release)
+# define AO_and_release_write(addr, val) AO_and_release(addr, val)
+# define AO_HAVE_and_release_write
+#endif
+#if !defined(AO_HAVE_and_acquire_read) \
+ && defined(AO_HAVE_and_read)
+# define AO_and_acquire_read(addr, val) AO_and_read(addr, val)
+# define AO_HAVE_and_acquire_read
+#endif
+#if !defined(AO_HAVE_and_acquire_read) \
+ && defined(AO_HAVE_and_acquire)
+# define AO_and_acquire_read(addr, val) AO_and_acquire(addr, val)
+# define AO_HAVE_and_acquire_read
+#endif
+
+/* or */
+#if defined(AO_HAVE_compare_and_swap_full) \
+ && !defined(AO_HAVE_or_full)
+ AO_INLINE void
+ AO_or_full(volatile AO_t *addr, AO_t value)
+ {
+ AO_t old;
+
+ do
+ {
+ old = *(AO_t *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_compare_and_swap_full(addr, old,
+ old | value)));
+ }
+# define AO_HAVE_or_full
+#endif
+
+#if defined(AO_HAVE_or_full)
+# if !defined(AO_HAVE_or_release)
+# define AO_or_release(addr, val) AO_or_full(addr, val)
+# define AO_HAVE_or_release
+# endif
+# if !defined(AO_HAVE_or_acquire)
+# define AO_or_acquire(addr, val) AO_or_full(addr, val)
+# define AO_HAVE_or_acquire
+# endif
+# if !defined(AO_HAVE_or_write)
+# define AO_or_write(addr, val) AO_or_full(addr, val)
+# define AO_HAVE_or_write
+# endif
+# if !defined(AO_HAVE_or_read)
+# define AO_or_read(addr, val) AO_or_full(addr, val)
+# define AO_HAVE_or_read
+# endif
+#endif /* AO_HAVE_or_full */
+
+#if !defined(AO_HAVE_or) && defined(AO_HAVE_or_release)
+# define AO_or(addr, val) AO_or_release(addr, val)
+# define AO_HAVE_or
+#endif
+#if !defined(AO_HAVE_or) && defined(AO_HAVE_or_acquire)
+# define AO_or(addr, val) AO_or_acquire(addr, val)
+# define AO_HAVE_or
+#endif
+#if !defined(AO_HAVE_or) && defined(AO_HAVE_or_write)
+# define AO_or(addr, val) AO_or_write(addr, val)
+# define AO_HAVE_or
+#endif
+#if !defined(AO_HAVE_or) && defined(AO_HAVE_or_read)
+# define AO_or(addr, val) AO_or_read(addr, val)
+# define AO_HAVE_or
+#endif
+
+#if defined(AO_HAVE_or_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_or_full)
+# define AO_or_full(addr, val) \
+ (AO_nop_full(), AO_or_acquire(addr, val))
+# define AO_HAVE_or_full
+#endif
+
+#if !defined(AO_HAVE_or_release_write) \
+ && defined(AO_HAVE_or_write)
+# define AO_or_release_write(addr, val) AO_or_write(addr, val)
+# define AO_HAVE_or_release_write
+#endif
+#if !defined(AO_HAVE_or_release_write) \
+ && defined(AO_HAVE_or_release)
+# define AO_or_release_write(addr, val) AO_or_release(addr, val)
+# define AO_HAVE_or_release_write
+#endif
+#if !defined(AO_HAVE_or_acquire_read) && defined(AO_HAVE_or_read)
+# define AO_or_acquire_read(addr, val) AO_or_read(addr, val)
+# define AO_HAVE_or_acquire_read
+#endif
+#if !defined(AO_HAVE_or_acquire_read) \
+ && defined(AO_HAVE_or_acquire)
+# define AO_or_acquire_read(addr, val) AO_or_acquire(addr, val)
+# define AO_HAVE_or_acquire_read
+#endif
+
+/* xor */
+#if defined(AO_HAVE_compare_and_swap_full) \
+ && !defined(AO_HAVE_xor_full)
+ AO_INLINE void
+ AO_xor_full(volatile AO_t *addr, AO_t value)
+ {
+ AO_t old;
+
+ do
+ {
+ old = *(AO_t *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_compare_and_swap_full(addr, old,
+ old ^ value)));
+ }
+# define AO_HAVE_xor_full
+#endif
+
+#if defined(AO_HAVE_xor_full)
+# if !defined(AO_HAVE_xor_release)
+# define AO_xor_release(addr, val) AO_xor_full(addr, val)
+# define AO_HAVE_xor_release
+# endif
+# if !defined(AO_HAVE_xor_acquire)
+# define AO_xor_acquire(addr, val) AO_xor_full(addr, val)
+# define AO_HAVE_xor_acquire
+# endif
+# if !defined(AO_HAVE_xor_write)
+# define AO_xor_write(addr, val) AO_xor_full(addr, val)
+# define AO_HAVE_xor_write
+# endif
+# if !defined(AO_HAVE_xor_read)
+# define AO_xor_read(addr, val) AO_xor_full(addr, val)
+# define AO_HAVE_xor_read
+# endif
+#endif /* AO_HAVE_xor_full */
+
+#if !defined(AO_HAVE_xor) && defined(AO_HAVE_xor_release)
+# define AO_xor(addr, val) AO_xor_release(addr, val)
+# define AO_HAVE_xor
+#endif
+#if !defined(AO_HAVE_xor) && defined(AO_HAVE_xor_acquire)
+# define AO_xor(addr, val) AO_xor_acquire(addr, val)
+# define AO_HAVE_xor
+#endif
+#if !defined(AO_HAVE_xor) && defined(AO_HAVE_xor_write)
+# define AO_xor(addr, val) AO_xor_write(addr, val)
+# define AO_HAVE_xor
+#endif
+#if !defined(AO_HAVE_xor) && defined(AO_HAVE_xor_read)
+# define AO_xor(addr, val) AO_xor_read(addr, val)
+# define AO_HAVE_xor
+#endif
+
+#if defined(AO_HAVE_xor_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_xor_full)
+# define AO_xor_full(addr, val) \
+ (AO_nop_full(), AO_xor_acquire(addr, val))
+# define AO_HAVE_xor_full
+#endif
+
+#if !defined(AO_HAVE_xor_release_write) \
+ && defined(AO_HAVE_xor_write)
+# define AO_xor_release_write(addr, val) AO_xor_write(addr, val)
+# define AO_HAVE_xor_release_write
+#endif
+#if !defined(AO_HAVE_xor_release_write) \
+ && defined(AO_HAVE_xor_release)
+# define AO_xor_release_write(addr, val) AO_xor_release(addr, val)
+# define AO_HAVE_xor_release_write
+#endif
+#if !defined(AO_HAVE_xor_acquire_read) \
+ && defined(AO_HAVE_xor_read)
+# define AO_xor_acquire_read(addr, val) AO_xor_read(addr, val)
+# define AO_HAVE_xor_acquire_read
+#endif
+#if !defined(AO_HAVE_xor_acquire_read) \
+ && defined(AO_HAVE_xor_acquire)
+# define AO_xor_acquire_read(addr, val) AO_xor_acquire(addr, val)
+# define AO_HAVE_xor_acquire_read
+#endif
+
+/* and/or/xor_dd_acquire_read are meaningless. */
--- /dev/null
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* XSIZE_compare_and_swap (based on fetch_compare_and_swap) */
+#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_full) \
+ && !defined(AO_HAVE_XSIZE_compare_and_swap_full)
+ AO_INLINE int
+ AO_XSIZE_compare_and_swap_full(volatile XCTYPE *addr, XCTYPE old_val,
+ XCTYPE new_val)
+ {
+ return AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_XSIZE_compare_and_swap_full
+#endif
+
+#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_XSIZE_compare_and_swap_acquire)
+ AO_INLINE int
+ AO_XSIZE_compare_and_swap_acquire(volatile XCTYPE *addr, XCTYPE old_val,
+ XCTYPE new_val)
+ {
+ return AO_XSIZE_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_XSIZE_compare_and_swap_acquire
+#endif
+
+#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release) \
+ && !defined(AO_HAVE_XSIZE_compare_and_swap_release)
+ AO_INLINE int
+ AO_XSIZE_compare_and_swap_release(volatile XCTYPE *addr, XCTYPE old_val,
+ XCTYPE new_val)
+ {
+ return AO_XSIZE_fetch_compare_and_swap_release(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_XSIZE_compare_and_swap_release
+#endif
+
+#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_write) \
+ && !defined(AO_HAVE_XSIZE_compare_and_swap_write)
+ AO_INLINE int
+ AO_XSIZE_compare_and_swap_write(volatile XCTYPE *addr, XCTYPE old_val,
+ XCTYPE new_val)
+ {
+ return AO_XSIZE_fetch_compare_and_swap_write(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_XSIZE_compare_and_swap_write
+#endif
+
+#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_read) \
+ && !defined(AO_HAVE_XSIZE_compare_and_swap_read)
+ AO_INLINE int
+ AO_XSIZE_compare_and_swap_read(volatile XCTYPE *addr, XCTYPE old_val,
+ XCTYPE new_val)
+ {
+ return AO_XSIZE_fetch_compare_and_swap_read(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_XSIZE_compare_and_swap_read
+#endif
+
+#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \
+ && !defined(AO_HAVE_XSIZE_compare_and_swap)
+ AO_INLINE int
+ AO_XSIZE_compare_and_swap(volatile XCTYPE *addr, XCTYPE old_val,
+ XCTYPE new_val)
+ {
+ return AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val) == old_val;
+ }
+# define AO_HAVE_XSIZE_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release_write) \
+ && !defined(AO_HAVE_XSIZE_compare_and_swap_release_write)
+ AO_INLINE int
+ AO_XSIZE_compare_and_swap_release_write(volatile XCTYPE *addr,
+ XCTYPE old_val, XCTYPE new_val)
+ {
+ return AO_XSIZE_fetch_compare_and_swap_release_write(addr, old_val,
+ new_val) == old_val;
+ }
+# define AO_HAVE_XSIZE_compare_and_swap_release_write
+#endif
+
+#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read) \
+ && !defined(AO_HAVE_XSIZE_compare_and_swap_acquire_read)
+ AO_INLINE int
+ AO_XSIZE_compare_and_swap_acquire_read(volatile XCTYPE *addr,
+ XCTYPE old_val, XCTYPE new_val)
+ {
+ return AO_XSIZE_fetch_compare_and_swap_acquire_read(addr, old_val,
+ new_val) == old_val;
+ }
+# define AO_HAVE_XSIZE_compare_and_swap_acquire_read
+#endif
+
+#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_dd_acquire_read) \
+ && !defined(AO_HAVE_XSIZE_compare_and_swap_dd_acquire_read)
+ AO_INLINE int
+ AO_XSIZE_compare_and_swap_dd_acquire_read(volatile XCTYPE *addr,
+ XCTYPE old_val, XCTYPE new_val)
+ {
+ return AO_XSIZE_fetch_compare_and_swap_dd_acquire_read(addr, old_val,
+ new_val) == old_val;
+ }
+# define AO_HAVE_XSIZE_compare_and_swap_dd_acquire_read
+#endif
+
+/* XSIZE_fetch_and_add */
+/* We first try to implement fetch_and_add variants in terms of the */
+/* corresponding compare_and_swap variants to minimize adding barriers. */
+#if defined(AO_HAVE_XSIZE_compare_and_swap_full) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_add_full)
+ AO_INLINE XCTYPE
+ AO_XSIZE_fetch_and_add_full(volatile XCTYPE *addr, XCTYPE incr)
+ {
+ XCTYPE old;
+
+ do
+ {
+ old = *(XCTYPE *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_XSIZE_fetch_and_add_full
+#endif
+
+#if defined(AO_HAVE_XSIZE_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_add_acquire)
+ AO_INLINE XCTYPE
+ AO_XSIZE_fetch_and_add_acquire(volatile XCTYPE *addr, XCTYPE incr)
+ {
+ XCTYPE old;
+
+ do
+ {
+ old = *(XCTYPE *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_acquire(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_XSIZE_fetch_and_add_acquire
+#endif
+
+#if defined(AO_HAVE_XSIZE_compare_and_swap_release) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_add_release)
+ AO_INLINE XCTYPE
+ AO_XSIZE_fetch_and_add_release(volatile XCTYPE *addr, XCTYPE incr)
+ {
+ XCTYPE old;
+
+ do
+ {
+ old = *(XCTYPE *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_release(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_XSIZE_fetch_and_add_release
+#endif
+
+#if defined(AO_HAVE_XSIZE_compare_and_swap) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_add)
+ AO_INLINE XCTYPE
+ AO_XSIZE_fetch_and_add(volatile XCTYPE *addr, XCTYPE incr)
+ {
+ XCTYPE old;
+
+ do
+ {
+ old = *(XCTYPE *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap(addr, old,
+ old + incr)));
+ return old;
+ }
+# define AO_HAVE_XSIZE_fetch_and_add
+#endif
+
+#if defined(AO_HAVE_XSIZE_fetch_and_add_full)
+# if !defined(AO_HAVE_XSIZE_fetch_and_add_release)
+# define AO_XSIZE_fetch_and_add_release(addr, val) \
+ AO_XSIZE_fetch_and_add_full(addr, val)
+# define AO_HAVE_XSIZE_fetch_and_add_release
+# endif
+# if !defined(AO_HAVE_XSIZE_fetch_and_add_acquire)
+# define AO_XSIZE_fetch_and_add_acquire(addr, val) \
+ AO_XSIZE_fetch_and_add_full(addr, val)
+# define AO_HAVE_XSIZE_fetch_and_add_acquire
+# endif
+# if !defined(AO_HAVE_XSIZE_fetch_and_add_write)
+# define AO_XSIZE_fetch_and_add_write(addr, val) \
+ AO_XSIZE_fetch_and_add_full(addr, val)
+# define AO_HAVE_XSIZE_fetch_and_add_write
+# endif
+# if !defined(AO_HAVE_XSIZE_fetch_and_add_read)
+# define AO_XSIZE_fetch_and_add_read(addr, val) \
+ AO_XSIZE_fetch_and_add_full(addr, val)
+# define AO_HAVE_XSIZE_fetch_and_add_read
+# endif
+#endif /* AO_HAVE_XSIZE_fetch_and_add_full */
+
+#if defined(AO_HAVE_XSIZE_fetch_and_add) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_add_acquire)
+ AO_INLINE XCTYPE
+ AO_XSIZE_fetch_and_add_acquire(volatile XCTYPE *addr, XCTYPE incr)
+ {
+ XCTYPE result = AO_XSIZE_fetch_and_add(addr, incr);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_XSIZE_fetch_and_add_acquire
+#endif
+#if defined(AO_HAVE_XSIZE_fetch_and_add) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_add_release)
+# define AO_XSIZE_fetch_and_add_release(addr, incr) \
+ (AO_nop_full(), AO_XSIZE_fetch_and_add(addr, incr))
+# define AO_HAVE_XSIZE_fetch_and_add_release
+#endif
+
+#if !defined(AO_HAVE_XSIZE_fetch_and_add) \
+ && defined(AO_HAVE_XSIZE_fetch_and_add_release)
+# define AO_XSIZE_fetch_and_add(addr, val) \
+ AO_XSIZE_fetch_and_add_release(addr, val)
+# define AO_HAVE_XSIZE_fetch_and_add
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_and_add) \
+ && defined(AO_HAVE_XSIZE_fetch_and_add_acquire)
+# define AO_XSIZE_fetch_and_add(addr, val) \
+ AO_XSIZE_fetch_and_add_acquire(addr, val)
+# define AO_HAVE_XSIZE_fetch_and_add
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_and_add) \
+ && defined(AO_HAVE_XSIZE_fetch_and_add_write)
+# define AO_XSIZE_fetch_and_add(addr, val) \
+ AO_XSIZE_fetch_and_add_write(addr, val)
+# define AO_HAVE_XSIZE_fetch_and_add
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_and_add) \
+ && defined(AO_HAVE_XSIZE_fetch_and_add_read)
+# define AO_XSIZE_fetch_and_add(addr, val) \
+ AO_XSIZE_fetch_and_add_read(addr, val)
+# define AO_HAVE_XSIZE_fetch_and_add
+#endif
+
+#if defined(AO_HAVE_XSIZE_fetch_and_add_acquire) \
+ && defined(AO_HAVE_nop_full) && !defined(AO_HAVE_XSIZE_fetch_and_add_full)
+# define AO_XSIZE_fetch_and_add_full(addr, val) \
+ (AO_nop_full(), AO_XSIZE_fetch_and_add_acquire(addr, val))
+# define AO_HAVE_XSIZE_fetch_and_add_full
+#endif
+
+#if !defined(AO_HAVE_XSIZE_fetch_and_add_release_write) \
+ && defined(AO_HAVE_XSIZE_fetch_and_add_write)
+# define AO_XSIZE_fetch_and_add_release_write(addr, val) \
+ AO_XSIZE_fetch_and_add_write(addr, val)
+# define AO_HAVE_XSIZE_fetch_and_add_release_write
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_and_add_release_write) \
+ && defined(AO_HAVE_XSIZE_fetch_and_add_release)
+# define AO_XSIZE_fetch_and_add_release_write(addr, val) \
+ AO_XSIZE_fetch_and_add_release(addr, val)
+# define AO_HAVE_XSIZE_fetch_and_add_release_write
+#endif
+
+#if !defined(AO_HAVE_XSIZE_fetch_and_add_acquire_read) \
+ && defined(AO_HAVE_XSIZE_fetch_and_add_read)
+# define AO_XSIZE_fetch_and_add_acquire_read(addr, val) \
+ AO_XSIZE_fetch_and_add_read(addr, val)
+# define AO_HAVE_XSIZE_fetch_and_add_acquire_read
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_and_add_acquire_read) \
+ && defined(AO_HAVE_XSIZE_fetch_and_add_acquire)
+# define AO_XSIZE_fetch_and_add_acquire_read(addr, val) \
+ AO_XSIZE_fetch_and_add_acquire(addr, val)
+# define AO_HAVE_XSIZE_fetch_and_add_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_XSIZE_fetch_and_add_acquire_read)
+# define AO_XSIZE_fetch_and_add_dd_acquire_read(addr, val) \
+ AO_XSIZE_fetch_and_add_acquire_read(addr, val)
+# define AO_HAVE_XSIZE_fetch_and_add_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_XSIZE_fetch_and_add)
+# define AO_XSIZE_fetch_and_add_dd_acquire_read(addr, val) \
+ AO_XSIZE_fetch_and_add(addr, val)
+# define AO_HAVE_XSIZE_fetch_and_add_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* XSIZE_fetch_and_add1 */
+#if defined(AO_HAVE_XSIZE_fetch_and_add_full) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_add1_full)
+# define AO_XSIZE_fetch_and_add1_full(addr) \
+ AO_XSIZE_fetch_and_add_full(addr, 1)
+# define AO_HAVE_XSIZE_fetch_and_add1_full
+#endif
+#if defined(AO_HAVE_XSIZE_fetch_and_add_release) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_add1_release)
+# define AO_XSIZE_fetch_and_add1_release(addr) \
+ AO_XSIZE_fetch_and_add_release(addr, 1)
+# define AO_HAVE_XSIZE_fetch_and_add1_release
+#endif
+#if defined(AO_HAVE_XSIZE_fetch_and_add_acquire) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_add1_acquire)
+# define AO_XSIZE_fetch_and_add1_acquire(addr) \
+ AO_XSIZE_fetch_and_add_acquire(addr, 1)
+# define AO_HAVE_XSIZE_fetch_and_add1_acquire
+#endif
+#if defined(AO_HAVE_XSIZE_fetch_and_add_write) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_add1_write)
+# define AO_XSIZE_fetch_and_add1_write(addr) \
+ AO_XSIZE_fetch_and_add_write(addr, 1)
+# define AO_HAVE_XSIZE_fetch_and_add1_write
+#endif
+#if defined(AO_HAVE_XSIZE_fetch_and_add_read) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_add1_read)
+# define AO_XSIZE_fetch_and_add1_read(addr) \
+ AO_XSIZE_fetch_and_add_read(addr, 1)
+# define AO_HAVE_XSIZE_fetch_and_add1_read
+#endif
+#if defined(AO_HAVE_XSIZE_fetch_and_add_release_write) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_add1_release_write)
+# define AO_XSIZE_fetch_and_add1_release_write(addr) \
+ AO_XSIZE_fetch_and_add_release_write(addr, 1)
+# define AO_HAVE_XSIZE_fetch_and_add1_release_write
+#endif
+#if defined(AO_HAVE_XSIZE_fetch_and_add_acquire_read) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_add1_acquire_read)
+# define AO_XSIZE_fetch_and_add1_acquire_read(addr) \
+ AO_XSIZE_fetch_and_add_acquire_read(addr, 1)
+# define AO_HAVE_XSIZE_fetch_and_add1_acquire_read
+#endif
+#if defined(AO_HAVE_XSIZE_fetch_and_add) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_add1)
+# define AO_XSIZE_fetch_and_add1(addr) AO_XSIZE_fetch_and_add(addr, 1)
+# define AO_HAVE_XSIZE_fetch_and_add1
+#endif
+
+#if defined(AO_HAVE_XSIZE_fetch_and_add1_full)
+# if !defined(AO_HAVE_XSIZE_fetch_and_add1_release)
+# define AO_XSIZE_fetch_and_add1_release(addr) \
+ AO_XSIZE_fetch_and_add1_full(addr)
+# define AO_HAVE_XSIZE_fetch_and_add1_release
+# endif
+# if !defined(AO_HAVE_XSIZE_fetch_and_add1_acquire)
+# define AO_XSIZE_fetch_and_add1_acquire(addr) \
+ AO_XSIZE_fetch_and_add1_full(addr)
+# define AO_HAVE_XSIZE_fetch_and_add1_acquire
+# endif
+# if !defined(AO_HAVE_XSIZE_fetch_and_add1_write)
+# define AO_XSIZE_fetch_and_add1_write(addr) \
+ AO_XSIZE_fetch_and_add1_full(addr)
+# define AO_HAVE_XSIZE_fetch_and_add1_write
+# endif
+# if !defined(AO_HAVE_XSIZE_fetch_and_add1_read)
+# define AO_XSIZE_fetch_and_add1_read(addr) \
+ AO_XSIZE_fetch_and_add1_full(addr)
+# define AO_HAVE_XSIZE_fetch_and_add1_read
+# endif
+#endif /* AO_HAVE_XSIZE_fetch_and_add1_full */
+
+#if !defined(AO_HAVE_XSIZE_fetch_and_add1) \
+ && defined(AO_HAVE_XSIZE_fetch_and_add1_release)
+# define AO_XSIZE_fetch_and_add1(addr) AO_XSIZE_fetch_and_add1_release(addr)
+# define AO_HAVE_XSIZE_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_and_add1) \
+ && defined(AO_HAVE_XSIZE_fetch_and_add1_acquire)
+# define AO_XSIZE_fetch_and_add1(addr) AO_XSIZE_fetch_and_add1_acquire(addr)
+# define AO_HAVE_XSIZE_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_and_add1) \
+ && defined(AO_HAVE_XSIZE_fetch_and_add1_write)
+# define AO_XSIZE_fetch_and_add1(addr) AO_XSIZE_fetch_and_add1_write(addr)
+# define AO_HAVE_XSIZE_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_and_add1) \
+ && defined(AO_HAVE_XSIZE_fetch_and_add1_read)
+# define AO_XSIZE_fetch_and_add1(addr) AO_XSIZE_fetch_and_add1_read(addr)
+# define AO_HAVE_XSIZE_fetch_and_add1
+#endif
+
+#if defined(AO_HAVE_XSIZE_fetch_and_add1_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_add1_full)
+# define AO_XSIZE_fetch_and_add1_full(addr) \
+ (AO_nop_full(), AO_XSIZE_fetch_and_add1_acquire(addr))
+# define AO_HAVE_XSIZE_fetch_and_add1_full
+#endif
+
+#if !defined(AO_HAVE_XSIZE_fetch_and_add1_release_write) \
+ && defined(AO_HAVE_XSIZE_fetch_and_add1_write)
+# define AO_XSIZE_fetch_and_add1_release_write(addr) \
+ AO_XSIZE_fetch_and_add1_write(addr)
+# define AO_HAVE_XSIZE_fetch_and_add1_release_write
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_and_add1_release_write) \
+ && defined(AO_HAVE_XSIZE_fetch_and_add1_release)
+# define AO_XSIZE_fetch_and_add1_release_write(addr) \
+ AO_XSIZE_fetch_and_add1_release(addr)
+# define AO_HAVE_XSIZE_fetch_and_add1_release_write
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_and_add1_acquire_read) \
+ && defined(AO_HAVE_XSIZE_fetch_and_add1_read)
+# define AO_XSIZE_fetch_and_add1_acquire_read(addr) \
+ AO_XSIZE_fetch_and_add1_read(addr)
+# define AO_HAVE_XSIZE_fetch_and_add1_acquire_read
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_and_add1_acquire_read) \
+ && defined(AO_HAVE_XSIZE_fetch_and_add1_acquire)
+# define AO_XSIZE_fetch_and_add1_acquire_read(addr) \
+ AO_XSIZE_fetch_and_add1_acquire(addr)
+# define AO_HAVE_XSIZE_fetch_and_add1_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_XSIZE_fetch_and_add1_acquire_read)
+# define AO_XSIZE_fetch_and_add1_dd_acquire_read(addr) \
+ AO_XSIZE_fetch_and_add1_acquire_read(addr)
+# define AO_HAVE_XSIZE_fetch_and_add1_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_XSIZE_fetch_and_add1)
+# define AO_XSIZE_fetch_and_add1_dd_acquire_read(addr) \
+ AO_XSIZE_fetch_and_add1(addr)
+# define AO_HAVE_XSIZE_fetch_and_add1_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* XSIZE_fetch_and_sub1 */
+#if defined(AO_HAVE_XSIZE_fetch_and_add_full) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_sub1_full)
+# define AO_XSIZE_fetch_and_sub1_full(addr) \
+ AO_XSIZE_fetch_and_add_full(addr, (XCTYPE)(-1))
+# define AO_HAVE_XSIZE_fetch_and_sub1_full
+#endif
+#if defined(AO_HAVE_XSIZE_fetch_and_add_release) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_sub1_release)
+# define AO_XSIZE_fetch_and_sub1_release(addr) \
+ AO_XSIZE_fetch_and_add_release(addr, (XCTYPE)(-1))
+# define AO_HAVE_XSIZE_fetch_and_sub1_release
+#endif
+#if defined(AO_HAVE_XSIZE_fetch_and_add_acquire) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire)
+# define AO_XSIZE_fetch_and_sub1_acquire(addr) \
+ AO_XSIZE_fetch_and_add_acquire(addr, (XCTYPE)(-1))
+# define AO_HAVE_XSIZE_fetch_and_sub1_acquire
+#endif
+#if defined(AO_HAVE_XSIZE_fetch_and_add_write) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_sub1_write)
+# define AO_XSIZE_fetch_and_sub1_write(addr) \
+ AO_XSIZE_fetch_and_add_write(addr, (XCTYPE)(-1))
+# define AO_HAVE_XSIZE_fetch_and_sub1_write
+#endif
+#if defined(AO_HAVE_XSIZE_fetch_and_add_read) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_sub1_read)
+# define AO_XSIZE_fetch_and_sub1_read(addr) \
+ AO_XSIZE_fetch_and_add_read(addr, (XCTYPE)(-1))
+# define AO_HAVE_XSIZE_fetch_and_sub1_read
+#endif
+#if defined(AO_HAVE_XSIZE_fetch_and_add_release_write) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_sub1_release_write)
+# define AO_XSIZE_fetch_and_sub1_release_write(addr) \
+ AO_XSIZE_fetch_and_add_release_write(addr, (XCTYPE)(-1))
+# define AO_HAVE_XSIZE_fetch_and_sub1_release_write
+#endif
+#if defined(AO_HAVE_XSIZE_fetch_and_add_acquire_read) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire_read)
+# define AO_XSIZE_fetch_and_sub1_acquire_read(addr) \
+ AO_XSIZE_fetch_and_add_acquire_read(addr, (XCTYPE)(-1))
+# define AO_HAVE_XSIZE_fetch_and_sub1_acquire_read
+#endif
+#if defined(AO_HAVE_XSIZE_fetch_and_add) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_sub1)
+# define AO_XSIZE_fetch_and_sub1(addr) \
+ AO_XSIZE_fetch_and_add(addr, (XCTYPE)(-1))
+# define AO_HAVE_XSIZE_fetch_and_sub1
+#endif
+
+#if defined(AO_HAVE_XSIZE_fetch_and_sub1_full)
+# if !defined(AO_HAVE_XSIZE_fetch_and_sub1_release)
+# define AO_XSIZE_fetch_and_sub1_release(addr) \
+ AO_XSIZE_fetch_and_sub1_full(addr)
+# define AO_HAVE_XSIZE_fetch_and_sub1_release
+# endif
+# if !defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire)
+# define AO_XSIZE_fetch_and_sub1_acquire(addr) \
+ AO_XSIZE_fetch_and_sub1_full(addr)
+# define AO_HAVE_XSIZE_fetch_and_sub1_acquire
+# endif
+# if !defined(AO_HAVE_XSIZE_fetch_and_sub1_write)
+# define AO_XSIZE_fetch_and_sub1_write(addr) \
+ AO_XSIZE_fetch_and_sub1_full(addr)
+# define AO_HAVE_XSIZE_fetch_and_sub1_write
+# endif
+# if !defined(AO_HAVE_XSIZE_fetch_and_sub1_read)
+# define AO_XSIZE_fetch_and_sub1_read(addr) \
+ AO_XSIZE_fetch_and_sub1_full(addr)
+# define AO_HAVE_XSIZE_fetch_and_sub1_read
+# endif
+#endif /* AO_HAVE_XSIZE_fetch_and_sub1_full */
+
+#if !defined(AO_HAVE_XSIZE_fetch_and_sub1) \
+ && defined(AO_HAVE_XSIZE_fetch_and_sub1_release)
+# define AO_XSIZE_fetch_and_sub1(addr) AO_XSIZE_fetch_and_sub1_release(addr)
+# define AO_HAVE_XSIZE_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_and_sub1) \
+ && defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire)
+# define AO_XSIZE_fetch_and_sub1(addr) AO_XSIZE_fetch_and_sub1_acquire(addr)
+# define AO_HAVE_XSIZE_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_and_sub1) \
+ && defined(AO_HAVE_XSIZE_fetch_and_sub1_write)
+# define AO_XSIZE_fetch_and_sub1(addr) AO_XSIZE_fetch_and_sub1_write(addr)
+# define AO_HAVE_XSIZE_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_and_sub1) \
+ && defined(AO_HAVE_XSIZE_fetch_and_sub1_read)
+# define AO_XSIZE_fetch_and_sub1(addr) AO_XSIZE_fetch_and_sub1_read(addr)
+# define AO_HAVE_XSIZE_fetch_and_sub1
+#endif
+
+#if defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_XSIZE_fetch_and_sub1_full)
+# define AO_XSIZE_fetch_and_sub1_full(addr) \
+ (AO_nop_full(), AO_XSIZE_fetch_and_sub1_acquire(addr))
+# define AO_HAVE_XSIZE_fetch_and_sub1_full
+#endif
+
+#if !defined(AO_HAVE_XSIZE_fetch_and_sub1_release_write) \
+ && defined(AO_HAVE_XSIZE_fetch_and_sub1_write)
+# define AO_XSIZE_fetch_and_sub1_release_write(addr) \
+ AO_XSIZE_fetch_and_sub1_write(addr)
+# define AO_HAVE_XSIZE_fetch_and_sub1_release_write
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_and_sub1_release_write) \
+ && defined(AO_HAVE_XSIZE_fetch_and_sub1_release)
+# define AO_XSIZE_fetch_and_sub1_release_write(addr) \
+ AO_XSIZE_fetch_and_sub1_release(addr)
+# define AO_HAVE_XSIZE_fetch_and_sub1_release_write
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire_read) \
+ && defined(AO_HAVE_XSIZE_fetch_and_sub1_read)
+# define AO_XSIZE_fetch_and_sub1_acquire_read(addr) \
+ AO_XSIZE_fetch_and_sub1_read(addr)
+# define AO_HAVE_XSIZE_fetch_and_sub1_acquire_read
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire_read) \
+ && defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire)
+# define AO_XSIZE_fetch_and_sub1_acquire_read(addr) \
+ AO_XSIZE_fetch_and_sub1_acquire(addr)
+# define AO_HAVE_XSIZE_fetch_and_sub1_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire_read)
+# define AO_XSIZE_fetch_and_sub1_dd_acquire_read(addr) \
+ AO_XSIZE_fetch_and_sub1_acquire_read(addr)
+# define AO_HAVE_XSIZE_fetch_and_sub1_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_XSIZE_fetch_and_sub1)
+# define AO_XSIZE_fetch_and_sub1_dd_acquire_read(addr) \
+ AO_XSIZE_fetch_and_sub1(addr)
+# define AO_HAVE_XSIZE_fetch_and_sub1_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* XSIZE_and */
+#if defined(AO_HAVE_XSIZE_compare_and_swap_full) \
+ && !defined(AO_HAVE_XSIZE_and_full)
+ AO_INLINE void
+ AO_XSIZE_and_full(volatile XCTYPE *addr, XCTYPE value)
+ {
+ XCTYPE old;
+
+ do
+ {
+ old = *(XCTYPE *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full(addr, old,
+ old & value)));
+ }
+# define AO_HAVE_XSIZE_and_full
+#endif
+
+#if defined(AO_HAVE_XSIZE_and_full)
+# if !defined(AO_HAVE_XSIZE_and_release)
+# define AO_XSIZE_and_release(addr, val) AO_XSIZE_and_full(addr, val)
+# define AO_HAVE_XSIZE_and_release
+# endif
+# if !defined(AO_HAVE_XSIZE_and_acquire)
+# define AO_XSIZE_and_acquire(addr, val) AO_XSIZE_and_full(addr, val)
+# define AO_HAVE_XSIZE_and_acquire
+# endif
+# if !defined(AO_HAVE_XSIZE_and_write)
+# define AO_XSIZE_and_write(addr, val) AO_XSIZE_and_full(addr, val)
+# define AO_HAVE_XSIZE_and_write
+# endif
+# if !defined(AO_HAVE_XSIZE_and_read)
+# define AO_XSIZE_and_read(addr, val) AO_XSIZE_and_full(addr, val)
+# define AO_HAVE_XSIZE_and_read
+# endif
+#endif /* AO_HAVE_XSIZE_and_full */
+
+#if !defined(AO_HAVE_XSIZE_and) && defined(AO_HAVE_XSIZE_and_release)
+# define AO_XSIZE_and(addr, val) AO_XSIZE_and_release(addr, val)
+# define AO_HAVE_XSIZE_and
+#endif
+#if !defined(AO_HAVE_XSIZE_and) && defined(AO_HAVE_XSIZE_and_acquire)
+# define AO_XSIZE_and(addr, val) AO_XSIZE_and_acquire(addr, val)
+# define AO_HAVE_XSIZE_and
+#endif
+#if !defined(AO_HAVE_XSIZE_and) && defined(AO_HAVE_XSIZE_and_write)
+# define AO_XSIZE_and(addr, val) AO_XSIZE_and_write(addr, val)
+# define AO_HAVE_XSIZE_and
+#endif
+#if !defined(AO_HAVE_XSIZE_and) && defined(AO_HAVE_XSIZE_and_read)
+# define AO_XSIZE_and(addr, val) AO_XSIZE_and_read(addr, val)
+# define AO_HAVE_XSIZE_and
+#endif
+
+#if defined(AO_HAVE_XSIZE_and_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_XSIZE_and_full)
+# define AO_XSIZE_and_full(addr, val) \
+ (AO_nop_full(), AO_XSIZE_and_acquire(addr, val))
+# define AO_HAVE_XSIZE_and_full
+#endif
+
+#if !defined(AO_HAVE_XSIZE_and_release_write) \
+ && defined(AO_HAVE_XSIZE_and_write)
+# define AO_XSIZE_and_release_write(addr, val) AO_XSIZE_and_write(addr, val)
+# define AO_HAVE_XSIZE_and_release_write
+#endif
+#if !defined(AO_HAVE_XSIZE_and_release_write) \
+ && defined(AO_HAVE_XSIZE_and_release)
+# define AO_XSIZE_and_release_write(addr, val) AO_XSIZE_and_release(addr, val)
+# define AO_HAVE_XSIZE_and_release_write
+#endif
+#if !defined(AO_HAVE_XSIZE_and_acquire_read) \
+ && defined(AO_HAVE_XSIZE_and_read)
+# define AO_XSIZE_and_acquire_read(addr, val) AO_XSIZE_and_read(addr, val)
+# define AO_HAVE_XSIZE_and_acquire_read
+#endif
+#if !defined(AO_HAVE_XSIZE_and_acquire_read) \
+ && defined(AO_HAVE_XSIZE_and_acquire)
+# define AO_XSIZE_and_acquire_read(addr, val) AO_XSIZE_and_acquire(addr, val)
+# define AO_HAVE_XSIZE_and_acquire_read
+#endif
+
+/* XSIZE_or */
+#if defined(AO_HAVE_XSIZE_compare_and_swap_full) \
+ && !defined(AO_HAVE_XSIZE_or_full)
+ AO_INLINE void
+ AO_XSIZE_or_full(volatile XCTYPE *addr, XCTYPE value)
+ {
+ XCTYPE old;
+
+ do
+ {
+ old = *(XCTYPE *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full(addr, old,
+ old | value)));
+ }
+# define AO_HAVE_XSIZE_or_full
+#endif
+
+#if defined(AO_HAVE_XSIZE_or_full)
+# if !defined(AO_HAVE_XSIZE_or_release)
+# define AO_XSIZE_or_release(addr, val) AO_XSIZE_or_full(addr, val)
+# define AO_HAVE_XSIZE_or_release
+# endif
+# if !defined(AO_HAVE_XSIZE_or_acquire)
+# define AO_XSIZE_or_acquire(addr, val) AO_XSIZE_or_full(addr, val)
+# define AO_HAVE_XSIZE_or_acquire
+# endif
+# if !defined(AO_HAVE_XSIZE_or_write)
+# define AO_XSIZE_or_write(addr, val) AO_XSIZE_or_full(addr, val)
+# define AO_HAVE_XSIZE_or_write
+# endif
+# if !defined(AO_HAVE_XSIZE_or_read)
+# define AO_XSIZE_or_read(addr, val) AO_XSIZE_or_full(addr, val)
+# define AO_HAVE_XSIZE_or_read
+# endif
+#endif /* AO_HAVE_XSIZE_or_full */
+
+#if !defined(AO_HAVE_XSIZE_or) && defined(AO_HAVE_XSIZE_or_release)
+# define AO_XSIZE_or(addr, val) AO_XSIZE_or_release(addr, val)
+# define AO_HAVE_XSIZE_or
+#endif
+#if !defined(AO_HAVE_XSIZE_or) && defined(AO_HAVE_XSIZE_or_acquire)
+# define AO_XSIZE_or(addr, val) AO_XSIZE_or_acquire(addr, val)
+# define AO_HAVE_XSIZE_or
+#endif
+#if !defined(AO_HAVE_XSIZE_or) && defined(AO_HAVE_XSIZE_or_write)
+# define AO_XSIZE_or(addr, val) AO_XSIZE_or_write(addr, val)
+# define AO_HAVE_XSIZE_or
+#endif
+#if !defined(AO_HAVE_XSIZE_or) && defined(AO_HAVE_XSIZE_or_read)
+# define AO_XSIZE_or(addr, val) AO_XSIZE_or_read(addr, val)
+# define AO_HAVE_XSIZE_or
+#endif
+
+#if defined(AO_HAVE_XSIZE_or_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_XSIZE_or_full)
+# define AO_XSIZE_or_full(addr, val) \
+ (AO_nop_full(), AO_XSIZE_or_acquire(addr, val))
+# define AO_HAVE_XSIZE_or_full
+#endif
+
+#if !defined(AO_HAVE_XSIZE_or_release_write) \
+ && defined(AO_HAVE_XSIZE_or_write)
+# define AO_XSIZE_or_release_write(addr, val) AO_XSIZE_or_write(addr, val)
+# define AO_HAVE_XSIZE_or_release_write
+#endif
+#if !defined(AO_HAVE_XSIZE_or_release_write) \
+ && defined(AO_HAVE_XSIZE_or_release)
+# define AO_XSIZE_or_release_write(addr, val) AO_XSIZE_or_release(addr, val)
+# define AO_HAVE_XSIZE_or_release_write
+#endif
+#if !defined(AO_HAVE_XSIZE_or_acquire_read) && defined(AO_HAVE_XSIZE_or_read)
+# define AO_XSIZE_or_acquire_read(addr, val) AO_XSIZE_or_read(addr, val)
+# define AO_HAVE_XSIZE_or_acquire_read
+#endif
+#if !defined(AO_HAVE_XSIZE_or_acquire_read) \
+ && defined(AO_HAVE_XSIZE_or_acquire)
+# define AO_XSIZE_or_acquire_read(addr, val) AO_XSIZE_or_acquire(addr, val)
+# define AO_HAVE_XSIZE_or_acquire_read
+#endif
+
+/* XSIZE_xor */
+#if defined(AO_HAVE_XSIZE_compare_and_swap_full) \
+ && !defined(AO_HAVE_XSIZE_xor_full)
+ AO_INLINE void
+ AO_XSIZE_xor_full(volatile XCTYPE *addr, XCTYPE value)
+ {
+ XCTYPE old;
+
+ do
+ {
+ old = *(XCTYPE *)addr;
+ }
+ while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full(addr, old,
+ old ^ value)));
+ }
+# define AO_HAVE_XSIZE_xor_full
+#endif
+
+#if defined(AO_HAVE_XSIZE_xor_full)
+# if !defined(AO_HAVE_XSIZE_xor_release)
+# define AO_XSIZE_xor_release(addr, val) AO_XSIZE_xor_full(addr, val)
+# define AO_HAVE_XSIZE_xor_release
+# endif
+# if !defined(AO_HAVE_XSIZE_xor_acquire)
+# define AO_XSIZE_xor_acquire(addr, val) AO_XSIZE_xor_full(addr, val)
+# define AO_HAVE_XSIZE_xor_acquire
+# endif
+# if !defined(AO_HAVE_XSIZE_xor_write)
+# define AO_XSIZE_xor_write(addr, val) AO_XSIZE_xor_full(addr, val)
+# define AO_HAVE_XSIZE_xor_write
+# endif
+# if !defined(AO_HAVE_XSIZE_xor_read)
+# define AO_XSIZE_xor_read(addr, val) AO_XSIZE_xor_full(addr, val)
+# define AO_HAVE_XSIZE_xor_read
+# endif
+#endif /* AO_HAVE_XSIZE_xor_full */
+
+#if !defined(AO_HAVE_XSIZE_xor) && defined(AO_HAVE_XSIZE_xor_release)
+# define AO_XSIZE_xor(addr, val) AO_XSIZE_xor_release(addr, val)
+# define AO_HAVE_XSIZE_xor
+#endif
+#if !defined(AO_HAVE_XSIZE_xor) && defined(AO_HAVE_XSIZE_xor_acquire)
+# define AO_XSIZE_xor(addr, val) AO_XSIZE_xor_acquire(addr, val)
+# define AO_HAVE_XSIZE_xor
+#endif
+#if !defined(AO_HAVE_XSIZE_xor) && defined(AO_HAVE_XSIZE_xor_write)
+# define AO_XSIZE_xor(addr, val) AO_XSIZE_xor_write(addr, val)
+# define AO_HAVE_XSIZE_xor
+#endif
+#if !defined(AO_HAVE_XSIZE_xor) && defined(AO_HAVE_XSIZE_xor_read)
+# define AO_XSIZE_xor(addr, val) AO_XSIZE_xor_read(addr, val)
+# define AO_HAVE_XSIZE_xor
+#endif
+
+#if defined(AO_HAVE_XSIZE_xor_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_XSIZE_xor_full)
+# define AO_XSIZE_xor_full(addr, val) \
+ (AO_nop_full(), AO_XSIZE_xor_acquire(addr, val))
+# define AO_HAVE_XSIZE_xor_full
+#endif
+
+#if !defined(AO_HAVE_XSIZE_xor_release_write) \
+ && defined(AO_HAVE_XSIZE_xor_write)
+# define AO_XSIZE_xor_release_write(addr, val) AO_XSIZE_xor_write(addr, val)
+# define AO_HAVE_XSIZE_xor_release_write
+#endif
+#if !defined(AO_HAVE_XSIZE_xor_release_write) \
+ && defined(AO_HAVE_XSIZE_xor_release)
+# define AO_XSIZE_xor_release_write(addr, val) AO_XSIZE_xor_release(addr, val)
+# define AO_HAVE_XSIZE_xor_release_write
+#endif
+#if !defined(AO_HAVE_XSIZE_xor_acquire_read) \
+ && defined(AO_HAVE_XSIZE_xor_read)
+# define AO_XSIZE_xor_acquire_read(addr, val) AO_XSIZE_xor_read(addr, val)
+# define AO_HAVE_XSIZE_xor_acquire_read
+#endif
+#if !defined(AO_HAVE_XSIZE_xor_acquire_read) \
+ && defined(AO_HAVE_XSIZE_xor_acquire)
+# define AO_XSIZE_xor_acquire_read(addr, val) AO_XSIZE_xor_acquire(addr, val)
+# define AO_HAVE_XSIZE_xor_acquire_read
+#endif
+
+/* XSIZE_and/or/xor_dd_acquire_read are meaningless. */
--- /dev/null
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* char_fetch_compare_and_swap */
+#if defined(AO_HAVE_char_fetch_compare_and_swap) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_char_fetch_compare_and_swap_acquire)
+ AO_INLINE unsigned/**/char
+ AO_char_fetch_compare_and_swap_acquire(volatile unsigned/**/char *addr,
+ unsigned/**/char old_val, unsigned/**/char new_val)
+ {
+ unsigned/**/char result = AO_char_fetch_compare_and_swap(addr, old_val, new_val);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_char_fetch_compare_and_swap_acquire
+#endif
+#if defined(AO_HAVE_char_fetch_compare_and_swap) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_char_fetch_compare_and_swap_release)
+# define AO_char_fetch_compare_and_swap_release(addr, old_val, new_val) \
+ (AO_nop_full(), \
+ AO_char_fetch_compare_and_swap(addr, old_val, new_val))
+# define AO_HAVE_char_fetch_compare_and_swap_release
+#endif
+#if defined(AO_HAVE_char_fetch_compare_and_swap_full)
+# if !defined(AO_HAVE_char_fetch_compare_and_swap_release)
+# define AO_char_fetch_compare_and_swap_release(addr, old_val, new_val) \
+ AO_char_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_char_fetch_compare_and_swap_release
+# endif
+# if !defined(AO_HAVE_char_fetch_compare_and_swap_acquire)
+# define AO_char_fetch_compare_and_swap_acquire(addr, old_val, new_val) \
+ AO_char_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_char_fetch_compare_and_swap_acquire
+# endif
+# if !defined(AO_HAVE_char_fetch_compare_and_swap_write)
+# define AO_char_fetch_compare_and_swap_write(addr, old_val, new_val) \
+ AO_char_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_char_fetch_compare_and_swap_write
+# endif
+# if !defined(AO_HAVE_char_fetch_compare_and_swap_read)
+# define AO_char_fetch_compare_and_swap_read(addr, old_val, new_val) \
+ AO_char_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_char_fetch_compare_and_swap_read
+# endif
+#endif /* AO_HAVE_char_fetch_compare_and_swap_full */
+
+#if !defined(AO_HAVE_char_fetch_compare_and_swap) \
+ && defined(AO_HAVE_char_fetch_compare_and_swap_release)
+# define AO_char_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_char_fetch_compare_and_swap_release(addr, old_val, new_val)
+# define AO_HAVE_char_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_char_fetch_compare_and_swap) \
+ && defined(AO_HAVE_char_fetch_compare_and_swap_acquire)
+# define AO_char_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_char_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+# define AO_HAVE_char_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_char_fetch_compare_and_swap) \
+ && defined(AO_HAVE_char_fetch_compare_and_swap_write)
+# define AO_char_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_char_fetch_compare_and_swap_write(addr, old_val, new_val)
+# define AO_HAVE_char_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_char_fetch_compare_and_swap) \
+ && defined(AO_HAVE_char_fetch_compare_and_swap_read)
+# define AO_char_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_char_fetch_compare_and_swap_read(addr, old_val, new_val)
+# define AO_HAVE_char_fetch_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_char_fetch_compare_and_swap_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_char_fetch_compare_and_swap_full)
+# define AO_char_fetch_compare_and_swap_full(addr, old_val, new_val) \
+ (AO_nop_full(), \
+ AO_char_fetch_compare_and_swap_acquire(addr, old_val, new_val))
+# define AO_HAVE_char_fetch_compare_and_swap_full
+#endif
+
+#if !defined(AO_HAVE_char_fetch_compare_and_swap_release_write) \
+ && defined(AO_HAVE_char_fetch_compare_and_swap_write)
+# define AO_char_fetch_compare_and_swap_release_write(addr,old_val,new_val) \
+ AO_char_fetch_compare_and_swap_write(addr, old_val, new_val)
+# define AO_HAVE_char_fetch_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_char_fetch_compare_and_swap_release_write) \
+ && defined(AO_HAVE_char_fetch_compare_and_swap_release)
+# define AO_char_fetch_compare_and_swap_release_write(addr,old_val,new_val) \
+ AO_char_fetch_compare_and_swap_release(addr, old_val, new_val)
+# define AO_HAVE_char_fetch_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_char_fetch_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_char_fetch_compare_and_swap_read)
+# define AO_char_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \
+ AO_char_fetch_compare_and_swap_read(addr, old_val, new_val)
+# define AO_HAVE_char_fetch_compare_and_swap_acquire_read
+#endif
+#if !defined(AO_HAVE_char_fetch_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_char_fetch_compare_and_swap_acquire)
+# define AO_char_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \
+ AO_char_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+# define AO_HAVE_char_fetch_compare_and_swap_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_char_fetch_compare_and_swap_acquire_read)
+# define AO_char_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \
+ AO_char_fetch_compare_and_swap_acquire_read(addr, old_val, new_val)
+# define AO_HAVE_char_fetch_compare_and_swap_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_char_fetch_compare_and_swap)
+# define AO_char_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \
+ AO_char_fetch_compare_and_swap(addr, old_val, new_val)
+# define AO_HAVE_char_fetch_compare_and_swap_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* char_compare_and_swap */
+#if defined(AO_HAVE_char_compare_and_swap) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_char_compare_and_swap_acquire)
+ AO_INLINE int
+ AO_char_compare_and_swap_acquire(volatile unsigned/**/char *addr, unsigned/**/char old,
+ unsigned/**/char new_val)
+ {
+ int result = AO_char_compare_and_swap(addr, old, new_val);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_char_compare_and_swap_acquire
+#endif
+#if defined(AO_HAVE_char_compare_and_swap) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_char_compare_and_swap_release)
+# define AO_char_compare_and_swap_release(addr, old, new_val) \
+ (AO_nop_full(), AO_char_compare_and_swap(addr, old, new_val))
+# define AO_HAVE_char_compare_and_swap_release
+#endif
+#if defined(AO_HAVE_char_compare_and_swap_full)
+# if !defined(AO_HAVE_char_compare_and_swap_release)
+# define AO_char_compare_and_swap_release(addr, old, new_val) \
+ AO_char_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_char_compare_and_swap_release
+# endif
+# if !defined(AO_HAVE_char_compare_and_swap_acquire)
+# define AO_char_compare_and_swap_acquire(addr, old, new_val) \
+ AO_char_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_char_compare_and_swap_acquire
+# endif
+# if !defined(AO_HAVE_char_compare_and_swap_write)
+# define AO_char_compare_and_swap_write(addr, old, new_val) \
+ AO_char_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_char_compare_and_swap_write
+# endif
+# if !defined(AO_HAVE_char_compare_and_swap_read)
+# define AO_char_compare_and_swap_read(addr, old, new_val) \
+ AO_char_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_char_compare_and_swap_read
+# endif
+#endif /* AO_HAVE_char_compare_and_swap_full */
+
+#if !defined(AO_HAVE_char_compare_and_swap) \
+ && defined(AO_HAVE_char_compare_and_swap_release)
+# define AO_char_compare_and_swap(addr, old, new_val) \
+ AO_char_compare_and_swap_release(addr, old, new_val)
+# define AO_HAVE_char_compare_and_swap
+#endif
+#if !defined(AO_HAVE_char_compare_and_swap) \
+ && defined(AO_HAVE_char_compare_and_swap_acquire)
+# define AO_char_compare_and_swap(addr, old, new_val) \
+ AO_char_compare_and_swap_acquire(addr, old, new_val)
+# define AO_HAVE_char_compare_and_swap
+#endif
+#if !defined(AO_HAVE_char_compare_and_swap) \
+ && defined(AO_HAVE_char_compare_and_swap_write)
+# define AO_char_compare_and_swap(addr, old, new_val) \
+ AO_char_compare_and_swap_write(addr, old, new_val)
+# define AO_HAVE_char_compare_and_swap
+#endif
+#if !defined(AO_HAVE_char_compare_and_swap) \
+ && defined(AO_HAVE_char_compare_and_swap_read)
+# define AO_char_compare_and_swap(addr, old, new_val) \
+ AO_char_compare_and_swap_read(addr, old, new_val)
+# define AO_HAVE_char_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_char_compare_and_swap_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_char_compare_and_swap_full)
+# define AO_char_compare_and_swap_full(addr, old, new_val) \
+ (AO_nop_full(), \
+ AO_char_compare_and_swap_acquire(addr, old, new_val))
+# define AO_HAVE_char_compare_and_swap_full
+#endif
+
+#if !defined(AO_HAVE_char_compare_and_swap_release_write) \
+ && defined(AO_HAVE_char_compare_and_swap_write)
+# define AO_char_compare_and_swap_release_write(addr, old, new_val) \
+ AO_char_compare_and_swap_write(addr, old, new_val)
+# define AO_HAVE_char_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_char_compare_and_swap_release_write) \
+ && defined(AO_HAVE_char_compare_and_swap_release)
+# define AO_char_compare_and_swap_release_write(addr, old, new_val) \
+ AO_char_compare_and_swap_release(addr, old, new_val)
+# define AO_HAVE_char_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_char_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_char_compare_and_swap_read)
+# define AO_char_compare_and_swap_acquire_read(addr, old, new_val) \
+ AO_char_compare_and_swap_read(addr, old, new_val)
+# define AO_HAVE_char_compare_and_swap_acquire_read
+#endif
+#if !defined(AO_HAVE_char_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_char_compare_and_swap_acquire)
+# define AO_char_compare_and_swap_acquire_read(addr, old, new_val) \
+ AO_char_compare_and_swap_acquire(addr, old, new_val)
+# define AO_HAVE_char_compare_and_swap_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_char_compare_and_swap_acquire_read)
+# define AO_char_compare_and_swap_dd_acquire_read(addr, old, new_val) \
+ AO_char_compare_and_swap_acquire_read(addr, old, new_val)
+# define AO_HAVE_char_compare_and_swap_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_char_compare_and_swap)
+# define AO_char_compare_and_swap_dd_acquire_read(addr, old, new_val) \
+ AO_char_compare_and_swap(addr, old, new_val)
+# define AO_HAVE_char_compare_and_swap_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* char_load */
+#if defined(AO_HAVE_char_load_full) && !defined(AO_HAVE_char_load_acquire)
+# define AO_char_load_acquire(addr) AO_char_load_full(addr)
+# define AO_HAVE_char_load_acquire
+#endif
+
+#if defined(AO_HAVE_char_load_acquire) && !defined(AO_HAVE_char_load)
+# define AO_char_load(addr) AO_char_load_acquire(addr)
+# define AO_HAVE_char_load
+#endif
+
+#if defined(AO_HAVE_char_load_full) && !defined(AO_HAVE_char_load_read)
+# define AO_char_load_read(addr) AO_char_load_full(addr)
+# define AO_HAVE_char_load_read
+#endif
+
+#if !defined(AO_HAVE_char_load_acquire_read) \
+ && defined(AO_HAVE_char_load_acquire)
+# define AO_char_load_acquire_read(addr) AO_char_load_acquire(addr)
+# define AO_HAVE_char_load_acquire_read
+#endif
+
+#if defined(AO_HAVE_char_load) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_char_load_acquire)
+ AO_INLINE unsigned/**/char
+ AO_char_load_acquire(const volatile unsigned/**/char *addr)
+ {
+ unsigned/**/char result = AO_char_load(addr);
+
+ /* Acquire barrier would be useless, since the load could be delayed */
+ /* beyond it. */
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_char_load_acquire
+#endif
+
+#if defined(AO_HAVE_char_load) && defined(AO_HAVE_nop_read) \
+ && !defined(AO_HAVE_char_load_read)
+ AO_INLINE unsigned/**/char
+ AO_char_load_read(const volatile unsigned/**/char *addr)
+ {
+ unsigned/**/char result = AO_char_load(addr);
+
+ AO_nop_read();
+ return result;
+ }
+# define AO_HAVE_char_load_read
+#endif
+
+#if defined(AO_HAVE_char_load_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_char_load_full)
+# define AO_char_load_full(addr) (AO_nop_full(), AO_char_load_acquire(addr))
+# define AO_HAVE_char_load_full
+#endif
+
+#if defined(AO_HAVE_char_compare_and_swap_read) \
+ && !defined(AO_HAVE_char_load_read)
+# define AO_char_CAS_BASED_LOAD_READ
+ AO_INLINE unsigned/**/char
+ AO_char_load_read(const volatile unsigned/**/char *addr)
+ {
+ unsigned/**/char result;
+
+ do {
+ result = *(const unsigned/**/char *)addr;
+ } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_read(
+ (volatile unsigned/**/char *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_char_load_read
+#endif
+
+#if !defined(AO_HAVE_char_load_acquire_read) \
+ && defined(AO_HAVE_char_load_read)
+# define AO_char_load_acquire_read(addr) AO_char_load_read(addr)
+# define AO_HAVE_char_load_acquire_read
+#endif
+
+#if defined(AO_HAVE_char_load_acquire_read) && !defined(AO_HAVE_char_load) \
+ && (!defined(AO_char_CAS_BASED_LOAD_READ) \
+ || !defined(AO_HAVE_char_compare_and_swap))
+# define AO_char_load(addr) AO_char_load_acquire_read(addr)
+# define AO_HAVE_char_load
+#endif
+
+#if defined(AO_HAVE_char_compare_and_swap_full) \
+ && !defined(AO_HAVE_char_load_full)
+ AO_INLINE unsigned/**/char
+ AO_char_load_full(const volatile unsigned/**/char *addr)
+ {
+ unsigned/**/char result;
+
+ do {
+ result = *(const unsigned/**/char *)addr;
+ } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_full(
+ (volatile unsigned/**/char *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_char_load_full
+#endif
+
+#if defined(AO_HAVE_char_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_char_load_acquire)
+ AO_INLINE unsigned/**/char
+ AO_char_load_acquire(const volatile unsigned/**/char *addr)
+ {
+ unsigned/**/char result;
+
+ do {
+ result = *(const unsigned/**/char *)addr;
+ } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_acquire(
+ (volatile unsigned/**/char *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_char_load_acquire
+#endif
+
+#if defined(AO_HAVE_char_compare_and_swap) && !defined(AO_HAVE_char_load)
+ AO_INLINE unsigned/**/char
+ AO_char_load(const volatile unsigned/**/char *addr)
+ {
+ unsigned/**/char result;
+
+ do {
+ result = *(const unsigned/**/char *)addr;
+ } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap(
+ (volatile unsigned/**/char *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_char_load
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_char_load_acquire_read)
+# define AO_char_load_dd_acquire_read(addr) \
+ AO_char_load_acquire_read(addr)
+# define AO_HAVE_char_load_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_char_load)
+# define AO_char_load_dd_acquire_read(addr) AO_char_load(addr)
+# define AO_HAVE_char_load_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* char_store */
+#if defined(AO_HAVE_char_store_full) && !defined(AO_HAVE_char_store_release)
+# define AO_char_store_release(addr, val) AO_char_store_full(addr, val)
+# define AO_HAVE_char_store_release
+#endif
+
+#if defined(AO_HAVE_char_store_release) && !defined(AO_HAVE_char_store)
+# define AO_char_store(addr, val) AO_char_store_release(addr, val)
+# define AO_HAVE_char_store
+#endif
+
+#if defined(AO_HAVE_char_store_full) && !defined(AO_HAVE_char_store_write)
+# define AO_char_store_write(addr, val) AO_char_store_full(addr, val)
+# define AO_HAVE_char_store_write
+#endif
+
+#if defined(AO_HAVE_char_store_release) \
+ && !defined(AO_HAVE_char_store_release_write)
+# define AO_char_store_release_write(addr, val) \
+ AO_char_store_release(addr, val)
+# define AO_HAVE_char_store_release_write
+#endif
+
+#if defined(AO_HAVE_char_store_write) && !defined(AO_HAVE_char_store)
+# define AO_char_store(addr, val) AO_char_store_write(addr, val)
+# define AO_HAVE_char_store
+#endif
+
+#if defined(AO_HAVE_char_store) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_char_store_release)
+# define AO_char_store_release(addr, val) \
+ (AO_nop_full(), AO_char_store(addr, val))
+# define AO_HAVE_char_store_release
+#endif
+
+#if defined(AO_HAVE_char_store) && defined(AO_HAVE_nop_write) \
+ && !defined(AO_HAVE_char_store_write)
+# define AO_char_store_write(addr, val) \
+ (AO_nop_write(), AO_char_store(addr, val))
+# define AO_HAVE_char_store_write
+#endif
+
+#if defined(AO_HAVE_char_compare_and_swap_write) \
+ && !defined(AO_HAVE_char_store_write)
+ AO_INLINE void
+ AO_char_store_write(volatile unsigned/**/char *addr, unsigned/**/char new_val)
+ {
+ unsigned/**/char old_val;
+
+ do {
+ old_val = *(unsigned/**/char *)addr;
+ } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_write(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_char_store_write
+#endif
+
+#if defined(AO_HAVE_char_store_write) \
+ && !defined(AO_HAVE_char_store_release_write)
+# define AO_char_store_release_write(addr, val) \
+ AO_char_store_write(addr, val)
+# define AO_HAVE_char_store_release_write
+#endif
+
+#if defined(AO_HAVE_char_store_release) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_char_store_full)
+# define AO_char_store_full(addr, val) \
+ (AO_char_store_release(addr, val), \
+ AO_nop_full())
+# define AO_HAVE_char_store_full
+#endif
+
+#if defined(AO_HAVE_char_compare_and_swap) && !defined(AO_HAVE_char_store)
+ AO_INLINE void
+ AO_char_store(volatile unsigned/**/char *addr, unsigned/**/char new_val)
+ {
+ unsigned/**/char old_val;
+
+ do {
+ old_val = *(unsigned/**/char *)addr;
+ } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap(addr,
+ old_val, new_val)));
+ }
+# define AO_HAVE_char_store
+#endif
+
+#if defined(AO_HAVE_char_compare_and_swap_release) \
+ && !defined(AO_HAVE_char_store_release)
+ AO_INLINE void
+ AO_char_store_release(volatile unsigned/**/char *addr, unsigned/**/char new_val)
+ {
+ unsigned/**/char old_val;
+
+ do {
+ old_val = *(unsigned/**/char *)addr;
+ } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_release(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_char_store_release
+#endif
+
+#if defined(AO_HAVE_char_compare_and_swap_full) \
+ && !defined(AO_HAVE_char_store_full)
+ AO_INLINE void
+ AO_char_store_full(volatile unsigned/**/char *addr, unsigned/**/char new_val)
+ {
+ unsigned/**/char old_val;
+
+ do {
+ old_val = *(unsigned/**/char *)addr;
+ } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_full(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_char_store_full
+#endif
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* short_fetch_compare_and_swap */
+#if defined(AO_HAVE_short_fetch_compare_and_swap) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_short_fetch_compare_and_swap_acquire)
+ AO_INLINE unsigned/**/short
+ AO_short_fetch_compare_and_swap_acquire(volatile unsigned/**/short *addr,
+ unsigned/**/short old_val, unsigned/**/short new_val)
+ {
+ unsigned/**/short result = AO_short_fetch_compare_and_swap(addr, old_val, new_val);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_short_fetch_compare_and_swap_acquire
+#endif
+#if defined(AO_HAVE_short_fetch_compare_and_swap) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_short_fetch_compare_and_swap_release)
+# define AO_short_fetch_compare_and_swap_release(addr, old_val, new_val) \
+ (AO_nop_full(), \
+ AO_short_fetch_compare_and_swap(addr, old_val, new_val))
+# define AO_HAVE_short_fetch_compare_and_swap_release
+#endif
+#if defined(AO_HAVE_short_fetch_compare_and_swap_full)
+# if !defined(AO_HAVE_short_fetch_compare_and_swap_release)
+# define AO_short_fetch_compare_and_swap_release(addr, old_val, new_val) \
+ AO_short_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_short_fetch_compare_and_swap_release
+# endif
+# if !defined(AO_HAVE_short_fetch_compare_and_swap_acquire)
+# define AO_short_fetch_compare_and_swap_acquire(addr, old_val, new_val) \
+ AO_short_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_short_fetch_compare_and_swap_acquire
+# endif
+# if !defined(AO_HAVE_short_fetch_compare_and_swap_write)
+# define AO_short_fetch_compare_and_swap_write(addr, old_val, new_val) \
+ AO_short_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_short_fetch_compare_and_swap_write
+# endif
+# if !defined(AO_HAVE_short_fetch_compare_and_swap_read)
+# define AO_short_fetch_compare_and_swap_read(addr, old_val, new_val) \
+ AO_short_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_short_fetch_compare_and_swap_read
+# endif
+#endif /* AO_HAVE_short_fetch_compare_and_swap_full */
+
+#if !defined(AO_HAVE_short_fetch_compare_and_swap) \
+ && defined(AO_HAVE_short_fetch_compare_and_swap_release)
+# define AO_short_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_short_fetch_compare_and_swap_release(addr, old_val, new_val)
+# define AO_HAVE_short_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_short_fetch_compare_and_swap) \
+ && defined(AO_HAVE_short_fetch_compare_and_swap_acquire)
+# define AO_short_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_short_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+# define AO_HAVE_short_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_short_fetch_compare_and_swap) \
+ && defined(AO_HAVE_short_fetch_compare_and_swap_write)
+# define AO_short_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_short_fetch_compare_and_swap_write(addr, old_val, new_val)
+# define AO_HAVE_short_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_short_fetch_compare_and_swap) \
+ && defined(AO_HAVE_short_fetch_compare_and_swap_read)
+# define AO_short_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_short_fetch_compare_and_swap_read(addr, old_val, new_val)
+# define AO_HAVE_short_fetch_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_short_fetch_compare_and_swap_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_short_fetch_compare_and_swap_full)
+# define AO_short_fetch_compare_and_swap_full(addr, old_val, new_val) \
+ (AO_nop_full(), \
+ AO_short_fetch_compare_and_swap_acquire(addr, old_val, new_val))
+# define AO_HAVE_short_fetch_compare_and_swap_full
+#endif
+
+#if !defined(AO_HAVE_short_fetch_compare_and_swap_release_write) \
+ && defined(AO_HAVE_short_fetch_compare_and_swap_write)
+# define AO_short_fetch_compare_and_swap_release_write(addr,old_val,new_val) \
+ AO_short_fetch_compare_and_swap_write(addr, old_val, new_val)
+# define AO_HAVE_short_fetch_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_short_fetch_compare_and_swap_release_write) \
+ && defined(AO_HAVE_short_fetch_compare_and_swap_release)
+# define AO_short_fetch_compare_and_swap_release_write(addr,old_val,new_val) \
+ AO_short_fetch_compare_and_swap_release(addr, old_val, new_val)
+# define AO_HAVE_short_fetch_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_short_fetch_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_short_fetch_compare_and_swap_read)
+# define AO_short_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \
+ AO_short_fetch_compare_and_swap_read(addr, old_val, new_val)
+# define AO_HAVE_short_fetch_compare_and_swap_acquire_read
+#endif
+#if !defined(AO_HAVE_short_fetch_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_short_fetch_compare_and_swap_acquire)
+# define AO_short_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \
+ AO_short_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+# define AO_HAVE_short_fetch_compare_and_swap_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_short_fetch_compare_and_swap_acquire_read)
+# define AO_short_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \
+ AO_short_fetch_compare_and_swap_acquire_read(addr, old_val, new_val)
+# define AO_HAVE_short_fetch_compare_and_swap_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_short_fetch_compare_and_swap)
+# define AO_short_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \
+ AO_short_fetch_compare_and_swap(addr, old_val, new_val)
+# define AO_HAVE_short_fetch_compare_and_swap_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* short_compare_and_swap */
+#if defined(AO_HAVE_short_compare_and_swap) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_short_compare_and_swap_acquire)
+ AO_INLINE int
+ AO_short_compare_and_swap_acquire(volatile unsigned/**/short *addr, unsigned/**/short old,
+ unsigned/**/short new_val)
+ {
+ int result = AO_short_compare_and_swap(addr, old, new_val);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_short_compare_and_swap_acquire
+#endif
+#if defined(AO_HAVE_short_compare_and_swap) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_short_compare_and_swap_release)
+# define AO_short_compare_and_swap_release(addr, old, new_val) \
+ (AO_nop_full(), AO_short_compare_and_swap(addr, old, new_val))
+# define AO_HAVE_short_compare_and_swap_release
+#endif
+#if defined(AO_HAVE_short_compare_and_swap_full)
+# if !defined(AO_HAVE_short_compare_and_swap_release)
+# define AO_short_compare_and_swap_release(addr, old, new_val) \
+ AO_short_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_short_compare_and_swap_release
+# endif
+# if !defined(AO_HAVE_short_compare_and_swap_acquire)
+# define AO_short_compare_and_swap_acquire(addr, old, new_val) \
+ AO_short_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_short_compare_and_swap_acquire
+# endif
+# if !defined(AO_HAVE_short_compare_and_swap_write)
+# define AO_short_compare_and_swap_write(addr, old, new_val) \
+ AO_short_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_short_compare_and_swap_write
+# endif
+# if !defined(AO_HAVE_short_compare_and_swap_read)
+# define AO_short_compare_and_swap_read(addr, old, new_val) \
+ AO_short_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_short_compare_and_swap_read
+# endif
+#endif /* AO_HAVE_short_compare_and_swap_full */
+
+#if !defined(AO_HAVE_short_compare_and_swap) \
+ && defined(AO_HAVE_short_compare_and_swap_release)
+# define AO_short_compare_and_swap(addr, old, new_val) \
+ AO_short_compare_and_swap_release(addr, old, new_val)
+# define AO_HAVE_short_compare_and_swap
+#endif
+#if !defined(AO_HAVE_short_compare_and_swap) \
+ && defined(AO_HAVE_short_compare_and_swap_acquire)
+# define AO_short_compare_and_swap(addr, old, new_val) \
+ AO_short_compare_and_swap_acquire(addr, old, new_val)
+# define AO_HAVE_short_compare_and_swap
+#endif
+#if !defined(AO_HAVE_short_compare_and_swap) \
+ && defined(AO_HAVE_short_compare_and_swap_write)
+# define AO_short_compare_and_swap(addr, old, new_val) \
+ AO_short_compare_and_swap_write(addr, old, new_val)
+# define AO_HAVE_short_compare_and_swap
+#endif
+#if !defined(AO_HAVE_short_compare_and_swap) \
+ && defined(AO_HAVE_short_compare_and_swap_read)
+# define AO_short_compare_and_swap(addr, old, new_val) \
+ AO_short_compare_and_swap_read(addr, old, new_val)
+# define AO_HAVE_short_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_short_compare_and_swap_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_short_compare_and_swap_full)
+# define AO_short_compare_and_swap_full(addr, old, new_val) \
+ (AO_nop_full(), \
+ AO_short_compare_and_swap_acquire(addr, old, new_val))
+# define AO_HAVE_short_compare_and_swap_full
+#endif
+
+#if !defined(AO_HAVE_short_compare_and_swap_release_write) \
+ && defined(AO_HAVE_short_compare_and_swap_write)
+# define AO_short_compare_and_swap_release_write(addr, old, new_val) \
+ AO_short_compare_and_swap_write(addr, old, new_val)
+# define AO_HAVE_short_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_short_compare_and_swap_release_write) \
+ && defined(AO_HAVE_short_compare_and_swap_release)
+# define AO_short_compare_and_swap_release_write(addr, old, new_val) \
+ AO_short_compare_and_swap_release(addr, old, new_val)
+# define AO_HAVE_short_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_short_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_short_compare_and_swap_read)
+# define AO_short_compare_and_swap_acquire_read(addr, old, new_val) \
+ AO_short_compare_and_swap_read(addr, old, new_val)
+# define AO_HAVE_short_compare_and_swap_acquire_read
+#endif
+#if !defined(AO_HAVE_short_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_short_compare_and_swap_acquire)
+# define AO_short_compare_and_swap_acquire_read(addr, old, new_val) \
+ AO_short_compare_and_swap_acquire(addr, old, new_val)
+# define AO_HAVE_short_compare_and_swap_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_short_compare_and_swap_acquire_read)
+# define AO_short_compare_and_swap_dd_acquire_read(addr, old, new_val) \
+ AO_short_compare_and_swap_acquire_read(addr, old, new_val)
+# define AO_HAVE_short_compare_and_swap_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_short_compare_and_swap)
+# define AO_short_compare_and_swap_dd_acquire_read(addr, old, new_val) \
+ AO_short_compare_and_swap(addr, old, new_val)
+# define AO_HAVE_short_compare_and_swap_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* short_load */
+#if defined(AO_HAVE_short_load_full) && !defined(AO_HAVE_short_load_acquire)
+# define AO_short_load_acquire(addr) AO_short_load_full(addr)
+# define AO_HAVE_short_load_acquire
+#endif
+
+#if defined(AO_HAVE_short_load_acquire) && !defined(AO_HAVE_short_load)
+# define AO_short_load(addr) AO_short_load_acquire(addr)
+# define AO_HAVE_short_load
+#endif
+
+#if defined(AO_HAVE_short_load_full) && !defined(AO_HAVE_short_load_read)
+# define AO_short_load_read(addr) AO_short_load_full(addr)
+# define AO_HAVE_short_load_read
+#endif
+
+#if !defined(AO_HAVE_short_load_acquire_read) \
+ && defined(AO_HAVE_short_load_acquire)
+# define AO_short_load_acquire_read(addr) AO_short_load_acquire(addr)
+# define AO_HAVE_short_load_acquire_read
+#endif
+
+#if defined(AO_HAVE_short_load) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_short_load_acquire)
+ AO_INLINE unsigned/**/short
+ AO_short_load_acquire(const volatile unsigned/**/short *addr)
+ {
+ unsigned/**/short result = AO_short_load(addr);
+
+ /* Acquire barrier would be useless, since the load could be delayed */
+ /* beyond it. */
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_short_load_acquire
+#endif
+
+#if defined(AO_HAVE_short_load) && defined(AO_HAVE_nop_read) \
+ && !defined(AO_HAVE_short_load_read)
+ AO_INLINE unsigned/**/short
+ AO_short_load_read(const volatile unsigned/**/short *addr)
+ {
+ unsigned/**/short result = AO_short_load(addr);
+
+ AO_nop_read();
+ return result;
+ }
+# define AO_HAVE_short_load_read
+#endif
+
+#if defined(AO_HAVE_short_load_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_short_load_full)
+# define AO_short_load_full(addr) (AO_nop_full(), AO_short_load_acquire(addr))
+# define AO_HAVE_short_load_full
+#endif
+
+#if defined(AO_HAVE_short_compare_and_swap_read) \
+ && !defined(AO_HAVE_short_load_read)
+# define AO_short_CAS_BASED_LOAD_READ
+ AO_INLINE unsigned/**/short
+ AO_short_load_read(const volatile unsigned/**/short *addr)
+ {
+ unsigned/**/short result;
+
+ do {
+ result = *(const unsigned/**/short *)addr;
+ } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_read(
+ (volatile unsigned/**/short *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_short_load_read
+#endif
+
+#if !defined(AO_HAVE_short_load_acquire_read) \
+ && defined(AO_HAVE_short_load_read)
+# define AO_short_load_acquire_read(addr) AO_short_load_read(addr)
+# define AO_HAVE_short_load_acquire_read
+#endif
+
+#if defined(AO_HAVE_short_load_acquire_read) && !defined(AO_HAVE_short_load) \
+ && (!defined(AO_short_CAS_BASED_LOAD_READ) \
+ || !defined(AO_HAVE_short_compare_and_swap))
+# define AO_short_load(addr) AO_short_load_acquire_read(addr)
+# define AO_HAVE_short_load
+#endif
+
+#if defined(AO_HAVE_short_compare_and_swap_full) \
+ && !defined(AO_HAVE_short_load_full)
+ AO_INLINE unsigned/**/short
+ AO_short_load_full(const volatile unsigned/**/short *addr)
+ {
+ unsigned/**/short result;
+
+ do {
+ result = *(const unsigned/**/short *)addr;
+ } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_full(
+ (volatile unsigned/**/short *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_short_load_full
+#endif
+
+#if defined(AO_HAVE_short_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_short_load_acquire)
+ AO_INLINE unsigned/**/short
+ AO_short_load_acquire(const volatile unsigned/**/short *addr)
+ {
+ unsigned/**/short result;
+
+ do {
+ result = *(const unsigned/**/short *)addr;
+ } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_acquire(
+ (volatile unsigned/**/short *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_short_load_acquire
+#endif
+
+#if defined(AO_HAVE_short_compare_and_swap) && !defined(AO_HAVE_short_load)
+ AO_INLINE unsigned/**/short
+ AO_short_load(const volatile unsigned/**/short *addr)
+ {
+ unsigned/**/short result;
+
+ do {
+ result = *(const unsigned/**/short *)addr;
+ } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap(
+ (volatile unsigned/**/short *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_short_load
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_short_load_acquire_read)
+# define AO_short_load_dd_acquire_read(addr) \
+ AO_short_load_acquire_read(addr)
+# define AO_HAVE_short_load_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_short_load)
+# define AO_short_load_dd_acquire_read(addr) AO_short_load(addr)
+# define AO_HAVE_short_load_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* short_store */
+#if defined(AO_HAVE_short_store_full) && !defined(AO_HAVE_short_store_release)
+# define AO_short_store_release(addr, val) AO_short_store_full(addr, val)
+# define AO_HAVE_short_store_release
+#endif
+
+#if defined(AO_HAVE_short_store_release) && !defined(AO_HAVE_short_store)
+# define AO_short_store(addr, val) AO_short_store_release(addr, val)
+# define AO_HAVE_short_store
+#endif
+
+#if defined(AO_HAVE_short_store_full) && !defined(AO_HAVE_short_store_write)
+# define AO_short_store_write(addr, val) AO_short_store_full(addr, val)
+# define AO_HAVE_short_store_write
+#endif
+
+#if defined(AO_HAVE_short_store_release) \
+ && !defined(AO_HAVE_short_store_release_write)
+# define AO_short_store_release_write(addr, val) \
+ AO_short_store_release(addr, val)
+# define AO_HAVE_short_store_release_write
+#endif
+
+#if defined(AO_HAVE_short_store_write) && !defined(AO_HAVE_short_store)
+# define AO_short_store(addr, val) AO_short_store_write(addr, val)
+# define AO_HAVE_short_store
+#endif
+
+#if defined(AO_HAVE_short_store) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_short_store_release)
+# define AO_short_store_release(addr, val) \
+ (AO_nop_full(), AO_short_store(addr, val))
+# define AO_HAVE_short_store_release
+#endif
+
+#if defined(AO_HAVE_short_store) && defined(AO_HAVE_nop_write) \
+ && !defined(AO_HAVE_short_store_write)
+# define AO_short_store_write(addr, val) \
+ (AO_nop_write(), AO_short_store(addr, val))
+# define AO_HAVE_short_store_write
+#endif
+
+#if defined(AO_HAVE_short_compare_and_swap_write) \
+ && !defined(AO_HAVE_short_store_write)
+ AO_INLINE void
+ AO_short_store_write(volatile unsigned/**/short *addr, unsigned/**/short new_val)
+ {
+ unsigned/**/short old_val;
+
+ do {
+ old_val = *(unsigned/**/short *)addr;
+ } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_write(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_short_store_write
+#endif
+
+#if defined(AO_HAVE_short_store_write) \
+ && !defined(AO_HAVE_short_store_release_write)
+# define AO_short_store_release_write(addr, val) \
+ AO_short_store_write(addr, val)
+# define AO_HAVE_short_store_release_write
+#endif
+
+#if defined(AO_HAVE_short_store_release) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_short_store_full)
+# define AO_short_store_full(addr, val) \
+ (AO_short_store_release(addr, val), \
+ AO_nop_full())
+# define AO_HAVE_short_store_full
+#endif
+
+#if defined(AO_HAVE_short_compare_and_swap) && !defined(AO_HAVE_short_store)
+ AO_INLINE void
+ AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short new_val)
+ {
+ unsigned/**/short old_val;
+
+ do {
+ old_val = *(unsigned/**/short *)addr;
+ } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap(addr,
+ old_val, new_val)));
+ }
+# define AO_HAVE_short_store
+#endif
+
+#if defined(AO_HAVE_short_compare_and_swap_release) \
+ && !defined(AO_HAVE_short_store_release)
+ AO_INLINE void
+ AO_short_store_release(volatile unsigned/**/short *addr, unsigned/**/short new_val)
+ {
+ unsigned/**/short old_val;
+
+ do {
+ old_val = *(unsigned/**/short *)addr;
+ } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_release(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_short_store_release
+#endif
+
+#if defined(AO_HAVE_short_compare_and_swap_full) \
+ && !defined(AO_HAVE_short_store_full)
+ AO_INLINE void
+ AO_short_store_full(volatile unsigned/**/short *addr, unsigned/**/short new_val)
+ {
+ unsigned/**/short old_val;
+
+ do {
+ old_val = *(unsigned/**/short *)addr;
+ } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_full(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_short_store_full
+#endif
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* int_fetch_compare_and_swap */
+#if defined(AO_HAVE_int_fetch_compare_and_swap) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_int_fetch_compare_and_swap_acquire)
+ AO_INLINE unsigned
+ AO_int_fetch_compare_and_swap_acquire(volatile unsigned *addr,
+ unsigned old_val, unsigned new_val)
+ {
+ unsigned result = AO_int_fetch_compare_and_swap(addr, old_val, new_val);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_int_fetch_compare_and_swap_acquire
+#endif
+#if defined(AO_HAVE_int_fetch_compare_and_swap) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_int_fetch_compare_and_swap_release)
+# define AO_int_fetch_compare_and_swap_release(addr, old_val, new_val) \
+ (AO_nop_full(), \
+ AO_int_fetch_compare_and_swap(addr, old_val, new_val))
+# define AO_HAVE_int_fetch_compare_and_swap_release
+#endif
+#if defined(AO_HAVE_int_fetch_compare_and_swap_full)
+# if !defined(AO_HAVE_int_fetch_compare_and_swap_release)
+# define AO_int_fetch_compare_and_swap_release(addr, old_val, new_val) \
+ AO_int_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_int_fetch_compare_and_swap_release
+# endif
+# if !defined(AO_HAVE_int_fetch_compare_and_swap_acquire)
+# define AO_int_fetch_compare_and_swap_acquire(addr, old_val, new_val) \
+ AO_int_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_int_fetch_compare_and_swap_acquire
+# endif
+# if !defined(AO_HAVE_int_fetch_compare_and_swap_write)
+# define AO_int_fetch_compare_and_swap_write(addr, old_val, new_val) \
+ AO_int_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_int_fetch_compare_and_swap_write
+# endif
+# if !defined(AO_HAVE_int_fetch_compare_and_swap_read)
+# define AO_int_fetch_compare_and_swap_read(addr, old_val, new_val) \
+ AO_int_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_int_fetch_compare_and_swap_read
+# endif
+#endif /* AO_HAVE_int_fetch_compare_and_swap_full */
+
+#if !defined(AO_HAVE_int_fetch_compare_and_swap) \
+ && defined(AO_HAVE_int_fetch_compare_and_swap_release)
+# define AO_int_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_int_fetch_compare_and_swap_release(addr, old_val, new_val)
+# define AO_HAVE_int_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_int_fetch_compare_and_swap) \
+ && defined(AO_HAVE_int_fetch_compare_and_swap_acquire)
+# define AO_int_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_int_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+# define AO_HAVE_int_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_int_fetch_compare_and_swap) \
+ && defined(AO_HAVE_int_fetch_compare_and_swap_write)
+# define AO_int_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_int_fetch_compare_and_swap_write(addr, old_val, new_val)
+# define AO_HAVE_int_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_int_fetch_compare_and_swap) \
+ && defined(AO_HAVE_int_fetch_compare_and_swap_read)
+# define AO_int_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_int_fetch_compare_and_swap_read(addr, old_val, new_val)
+# define AO_HAVE_int_fetch_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_int_fetch_compare_and_swap_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_int_fetch_compare_and_swap_full)
+# define AO_int_fetch_compare_and_swap_full(addr, old_val, new_val) \
+ (AO_nop_full(), \
+ AO_int_fetch_compare_and_swap_acquire(addr, old_val, new_val))
+# define AO_HAVE_int_fetch_compare_and_swap_full
+#endif
+
+#if !defined(AO_HAVE_int_fetch_compare_and_swap_release_write) \
+ && defined(AO_HAVE_int_fetch_compare_and_swap_write)
+# define AO_int_fetch_compare_and_swap_release_write(addr,old_val,new_val) \
+ AO_int_fetch_compare_and_swap_write(addr, old_val, new_val)
+# define AO_HAVE_int_fetch_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_int_fetch_compare_and_swap_release_write) \
+ && defined(AO_HAVE_int_fetch_compare_and_swap_release)
+# define AO_int_fetch_compare_and_swap_release_write(addr,old_val,new_val) \
+ AO_int_fetch_compare_and_swap_release(addr, old_val, new_val)
+# define AO_HAVE_int_fetch_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_int_fetch_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_int_fetch_compare_and_swap_read)
+# define AO_int_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \
+ AO_int_fetch_compare_and_swap_read(addr, old_val, new_val)
+# define AO_HAVE_int_fetch_compare_and_swap_acquire_read
+#endif
+#if !defined(AO_HAVE_int_fetch_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_int_fetch_compare_and_swap_acquire)
+# define AO_int_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \
+ AO_int_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+# define AO_HAVE_int_fetch_compare_and_swap_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_int_fetch_compare_and_swap_acquire_read)
+# define AO_int_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \
+ AO_int_fetch_compare_and_swap_acquire_read(addr, old_val, new_val)
+# define AO_HAVE_int_fetch_compare_and_swap_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_int_fetch_compare_and_swap)
+# define AO_int_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \
+ AO_int_fetch_compare_and_swap(addr, old_val, new_val)
+# define AO_HAVE_int_fetch_compare_and_swap_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* int_compare_and_swap */
+#if defined(AO_HAVE_int_compare_and_swap) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_int_compare_and_swap_acquire)
+ AO_INLINE int
+ AO_int_compare_and_swap_acquire(volatile unsigned *addr, unsigned old,
+ unsigned new_val)
+ {
+ int result = AO_int_compare_and_swap(addr, old, new_val);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_int_compare_and_swap_acquire
+#endif
+#if defined(AO_HAVE_int_compare_and_swap) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_int_compare_and_swap_release)
+# define AO_int_compare_and_swap_release(addr, old, new_val) \
+ (AO_nop_full(), AO_int_compare_and_swap(addr, old, new_val))
+# define AO_HAVE_int_compare_and_swap_release
+#endif
+#if defined(AO_HAVE_int_compare_and_swap_full)
+# if !defined(AO_HAVE_int_compare_and_swap_release)
+# define AO_int_compare_and_swap_release(addr, old, new_val) \
+ AO_int_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_int_compare_and_swap_release
+# endif
+# if !defined(AO_HAVE_int_compare_and_swap_acquire)
+# define AO_int_compare_and_swap_acquire(addr, old, new_val) \
+ AO_int_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_int_compare_and_swap_acquire
+# endif
+# if !defined(AO_HAVE_int_compare_and_swap_write)
+# define AO_int_compare_and_swap_write(addr, old, new_val) \
+ AO_int_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_int_compare_and_swap_write
+# endif
+# if !defined(AO_HAVE_int_compare_and_swap_read)
+# define AO_int_compare_and_swap_read(addr, old, new_val) \
+ AO_int_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_int_compare_and_swap_read
+# endif
+#endif /* AO_HAVE_int_compare_and_swap_full */
+
+#if !defined(AO_HAVE_int_compare_and_swap) \
+ && defined(AO_HAVE_int_compare_and_swap_release)
+# define AO_int_compare_and_swap(addr, old, new_val) \
+ AO_int_compare_and_swap_release(addr, old, new_val)
+# define AO_HAVE_int_compare_and_swap
+#endif
+#if !defined(AO_HAVE_int_compare_and_swap) \
+ && defined(AO_HAVE_int_compare_and_swap_acquire)
+# define AO_int_compare_and_swap(addr, old, new_val) \
+ AO_int_compare_and_swap_acquire(addr, old, new_val)
+# define AO_HAVE_int_compare_and_swap
+#endif
+#if !defined(AO_HAVE_int_compare_and_swap) \
+ && defined(AO_HAVE_int_compare_and_swap_write)
+# define AO_int_compare_and_swap(addr, old, new_val) \
+ AO_int_compare_and_swap_write(addr, old, new_val)
+# define AO_HAVE_int_compare_and_swap
+#endif
+#if !defined(AO_HAVE_int_compare_and_swap) \
+ && defined(AO_HAVE_int_compare_and_swap_read)
+# define AO_int_compare_and_swap(addr, old, new_val) \
+ AO_int_compare_and_swap_read(addr, old, new_val)
+# define AO_HAVE_int_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_int_compare_and_swap_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_int_compare_and_swap_full)
+# define AO_int_compare_and_swap_full(addr, old, new_val) \
+ (AO_nop_full(), \
+ AO_int_compare_and_swap_acquire(addr, old, new_val))
+# define AO_HAVE_int_compare_and_swap_full
+#endif
+
+#if !defined(AO_HAVE_int_compare_and_swap_release_write) \
+ && defined(AO_HAVE_int_compare_and_swap_write)
+# define AO_int_compare_and_swap_release_write(addr, old, new_val) \
+ AO_int_compare_and_swap_write(addr, old, new_val)
+# define AO_HAVE_int_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_int_compare_and_swap_release_write) \
+ && defined(AO_HAVE_int_compare_and_swap_release)
+# define AO_int_compare_and_swap_release_write(addr, old, new_val) \
+ AO_int_compare_and_swap_release(addr, old, new_val)
+# define AO_HAVE_int_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_int_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_int_compare_and_swap_read)
+# define AO_int_compare_and_swap_acquire_read(addr, old, new_val) \
+ AO_int_compare_and_swap_read(addr, old, new_val)
+# define AO_HAVE_int_compare_and_swap_acquire_read
+#endif
+#if !defined(AO_HAVE_int_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_int_compare_and_swap_acquire)
+# define AO_int_compare_and_swap_acquire_read(addr, old, new_val) \
+ AO_int_compare_and_swap_acquire(addr, old, new_val)
+# define AO_HAVE_int_compare_and_swap_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_int_compare_and_swap_acquire_read)
+# define AO_int_compare_and_swap_dd_acquire_read(addr, old, new_val) \
+ AO_int_compare_and_swap_acquire_read(addr, old, new_val)
+# define AO_HAVE_int_compare_and_swap_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_int_compare_and_swap)
+# define AO_int_compare_and_swap_dd_acquire_read(addr, old, new_val) \
+ AO_int_compare_and_swap(addr, old, new_val)
+# define AO_HAVE_int_compare_and_swap_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* int_load */
+#if defined(AO_HAVE_int_load_full) && !defined(AO_HAVE_int_load_acquire)
+# define AO_int_load_acquire(addr) AO_int_load_full(addr)
+# define AO_HAVE_int_load_acquire
+#endif
+
+#if defined(AO_HAVE_int_load_acquire) && !defined(AO_HAVE_int_load)
+# define AO_int_load(addr) AO_int_load_acquire(addr)
+# define AO_HAVE_int_load
+#endif
+
+#if defined(AO_HAVE_int_load_full) && !defined(AO_HAVE_int_load_read)
+# define AO_int_load_read(addr) AO_int_load_full(addr)
+# define AO_HAVE_int_load_read
+#endif
+
+#if !defined(AO_HAVE_int_load_acquire_read) \
+ && defined(AO_HAVE_int_load_acquire)
+# define AO_int_load_acquire_read(addr) AO_int_load_acquire(addr)
+# define AO_HAVE_int_load_acquire_read
+#endif
+
+#if defined(AO_HAVE_int_load) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_int_load_acquire)
+ AO_INLINE unsigned
+ AO_int_load_acquire(const volatile unsigned *addr)
+ {
+ unsigned result = AO_int_load(addr);
+
+ /* Acquire barrier would be useless, since the load could be delayed */
+ /* beyond it. */
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_int_load_acquire
+#endif
+
+#if defined(AO_HAVE_int_load) && defined(AO_HAVE_nop_read) \
+ && !defined(AO_HAVE_int_load_read)
+ AO_INLINE unsigned
+ AO_int_load_read(const volatile unsigned *addr)
+ {
+ unsigned result = AO_int_load(addr);
+
+ AO_nop_read();
+ return result;
+ }
+# define AO_HAVE_int_load_read
+#endif
+
+#if defined(AO_HAVE_int_load_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_int_load_full)
+# define AO_int_load_full(addr) (AO_nop_full(), AO_int_load_acquire(addr))
+# define AO_HAVE_int_load_full
+#endif
+
+#if defined(AO_HAVE_int_compare_and_swap_read) \
+ && !defined(AO_HAVE_int_load_read)
+# define AO_int_CAS_BASED_LOAD_READ
+ AO_INLINE unsigned
+ AO_int_load_read(const volatile unsigned *addr)
+ {
+ unsigned result;
+
+ do {
+ result = *(const unsigned *)addr;
+ } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_read(
+ (volatile unsigned *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_int_load_read
+#endif
+
+#if !defined(AO_HAVE_int_load_acquire_read) \
+ && defined(AO_HAVE_int_load_read)
+# define AO_int_load_acquire_read(addr) AO_int_load_read(addr)
+# define AO_HAVE_int_load_acquire_read
+#endif
+
+#if defined(AO_HAVE_int_load_acquire_read) && !defined(AO_HAVE_int_load) \
+ && (!defined(AO_int_CAS_BASED_LOAD_READ) \
+ || !defined(AO_HAVE_int_compare_and_swap))
+# define AO_int_load(addr) AO_int_load_acquire_read(addr)
+# define AO_HAVE_int_load
+#endif
+
+#if defined(AO_HAVE_int_compare_and_swap_full) \
+ && !defined(AO_HAVE_int_load_full)
+ AO_INLINE unsigned
+ AO_int_load_full(const volatile unsigned *addr)
+ {
+ unsigned result;
+
+ do {
+ result = *(const unsigned *)addr;
+ } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_full(
+ (volatile unsigned *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_int_load_full
+#endif
+
+#if defined(AO_HAVE_int_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_int_load_acquire)
+ AO_INLINE unsigned
+ AO_int_load_acquire(const volatile unsigned *addr)
+ {
+ unsigned result;
+
+ do {
+ result = *(const unsigned *)addr;
+ } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_acquire(
+ (volatile unsigned *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_int_load_acquire
+#endif
+
+#if defined(AO_HAVE_int_compare_and_swap) && !defined(AO_HAVE_int_load)
+ AO_INLINE unsigned
+ AO_int_load(const volatile unsigned *addr)
+ {
+ unsigned result;
+
+ do {
+ result = *(const unsigned *)addr;
+ } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap(
+ (volatile unsigned *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_int_load
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_int_load_acquire_read)
+# define AO_int_load_dd_acquire_read(addr) \
+ AO_int_load_acquire_read(addr)
+# define AO_HAVE_int_load_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_int_load)
+# define AO_int_load_dd_acquire_read(addr) AO_int_load(addr)
+# define AO_HAVE_int_load_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* int_store */
+#if defined(AO_HAVE_int_store_full) && !defined(AO_HAVE_int_store_release)
+# define AO_int_store_release(addr, val) AO_int_store_full(addr, val)
+# define AO_HAVE_int_store_release
+#endif
+
+#if defined(AO_HAVE_int_store_release) && !defined(AO_HAVE_int_store)
+# define AO_int_store(addr, val) AO_int_store_release(addr, val)
+# define AO_HAVE_int_store
+#endif
+
+#if defined(AO_HAVE_int_store_full) && !defined(AO_HAVE_int_store_write)
+# define AO_int_store_write(addr, val) AO_int_store_full(addr, val)
+# define AO_HAVE_int_store_write
+#endif
+
+#if defined(AO_HAVE_int_store_release) \
+ && !defined(AO_HAVE_int_store_release_write)
+# define AO_int_store_release_write(addr, val) \
+ AO_int_store_release(addr, val)
+# define AO_HAVE_int_store_release_write
+#endif
+
+#if defined(AO_HAVE_int_store_write) && !defined(AO_HAVE_int_store)
+# define AO_int_store(addr, val) AO_int_store_write(addr, val)
+# define AO_HAVE_int_store
+#endif
+
+#if defined(AO_HAVE_int_store) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_int_store_release)
+# define AO_int_store_release(addr, val) \
+ (AO_nop_full(), AO_int_store(addr, val))
+# define AO_HAVE_int_store_release
+#endif
+
+#if defined(AO_HAVE_int_store) && defined(AO_HAVE_nop_write) \
+ && !defined(AO_HAVE_int_store_write)
+# define AO_int_store_write(addr, val) \
+ (AO_nop_write(), AO_int_store(addr, val))
+# define AO_HAVE_int_store_write
+#endif
+
+#if defined(AO_HAVE_int_compare_and_swap_write) \
+ && !defined(AO_HAVE_int_store_write)
+ AO_INLINE void
+ AO_int_store_write(volatile unsigned *addr, unsigned new_val)
+ {
+ unsigned old_val;
+
+ do {
+ old_val = *(unsigned *)addr;
+ } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_write(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_int_store_write
+#endif
+
+#if defined(AO_HAVE_int_store_write) \
+ && !defined(AO_HAVE_int_store_release_write)
+# define AO_int_store_release_write(addr, val) \
+ AO_int_store_write(addr, val)
+# define AO_HAVE_int_store_release_write
+#endif
+
+#if defined(AO_HAVE_int_store_release) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_int_store_full)
+# define AO_int_store_full(addr, val) \
+ (AO_int_store_release(addr, val), \
+ AO_nop_full())
+# define AO_HAVE_int_store_full
+#endif
+
+#if defined(AO_HAVE_int_compare_and_swap) && !defined(AO_HAVE_int_store)
+ AO_INLINE void
+ AO_int_store(volatile unsigned *addr, unsigned new_val)
+ {
+ unsigned old_val;
+
+ do {
+ old_val = *(unsigned *)addr;
+ } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap(addr,
+ old_val, new_val)));
+ }
+# define AO_HAVE_int_store
+#endif
+
+#if defined(AO_HAVE_int_compare_and_swap_release) \
+ && !defined(AO_HAVE_int_store_release)
+ AO_INLINE void
+ AO_int_store_release(volatile unsigned *addr, unsigned new_val)
+ {
+ unsigned old_val;
+
+ do {
+ old_val = *(unsigned *)addr;
+ } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_release(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_int_store_release
+#endif
+
+#if defined(AO_HAVE_int_compare_and_swap_full) \
+ && !defined(AO_HAVE_int_store_full)
+ AO_INLINE void
+ AO_int_store_full(volatile unsigned *addr, unsigned new_val)
+ {
+ unsigned old_val;
+
+ do {
+ old_val = *(unsigned *)addr;
+ } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_full(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_int_store_full
+#endif
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* fetch_compare_and_swap */
+#if defined(AO_HAVE_fetch_compare_and_swap) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_fetch_compare_and_swap_acquire)
+ AO_INLINE AO_t
+ AO_fetch_compare_and_swap_acquire(volatile AO_t *addr,
+ AO_t old_val, AO_t new_val)
+ {
+ AO_t result = AO_fetch_compare_and_swap(addr, old_val, new_val);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_fetch_compare_and_swap_acquire
+#endif
+#if defined(AO_HAVE_fetch_compare_and_swap) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_fetch_compare_and_swap_release)
+# define AO_fetch_compare_and_swap_release(addr, old_val, new_val) \
+ (AO_nop_full(), \
+ AO_fetch_compare_and_swap(addr, old_val, new_val))
+# define AO_HAVE_fetch_compare_and_swap_release
+#endif
+#if defined(AO_HAVE_fetch_compare_and_swap_full)
+# if !defined(AO_HAVE_fetch_compare_and_swap_release)
+# define AO_fetch_compare_and_swap_release(addr, old_val, new_val) \
+ AO_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_fetch_compare_and_swap_release
+# endif
+# if !defined(AO_HAVE_fetch_compare_and_swap_acquire)
+# define AO_fetch_compare_and_swap_acquire(addr, old_val, new_val) \
+ AO_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_fetch_compare_and_swap_acquire
+# endif
+# if !defined(AO_HAVE_fetch_compare_and_swap_write)
+# define AO_fetch_compare_and_swap_write(addr, old_val, new_val) \
+ AO_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_fetch_compare_and_swap_write
+# endif
+# if !defined(AO_HAVE_fetch_compare_and_swap_read)
+# define AO_fetch_compare_and_swap_read(addr, old_val, new_val) \
+ AO_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_fetch_compare_and_swap_read
+# endif
+#endif /* AO_HAVE_fetch_compare_and_swap_full */
+
+#if !defined(AO_HAVE_fetch_compare_and_swap) \
+ && defined(AO_HAVE_fetch_compare_and_swap_release)
+# define AO_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_fetch_compare_and_swap_release(addr, old_val, new_val)
+# define AO_HAVE_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_fetch_compare_and_swap) \
+ && defined(AO_HAVE_fetch_compare_and_swap_acquire)
+# define AO_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+# define AO_HAVE_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_fetch_compare_and_swap) \
+ && defined(AO_HAVE_fetch_compare_and_swap_write)
+# define AO_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_fetch_compare_and_swap_write(addr, old_val, new_val)
+# define AO_HAVE_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_fetch_compare_and_swap) \
+ && defined(AO_HAVE_fetch_compare_and_swap_read)
+# define AO_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_fetch_compare_and_swap_read(addr, old_val, new_val)
+# define AO_HAVE_fetch_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_fetch_compare_and_swap_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_fetch_compare_and_swap_full)
+# define AO_fetch_compare_and_swap_full(addr, old_val, new_val) \
+ (AO_nop_full(), \
+ AO_fetch_compare_and_swap_acquire(addr, old_val, new_val))
+# define AO_HAVE_fetch_compare_and_swap_full
+#endif
+
+#if !defined(AO_HAVE_fetch_compare_and_swap_release_write) \
+ && defined(AO_HAVE_fetch_compare_and_swap_write)
+# define AO_fetch_compare_and_swap_release_write(addr,old_val,new_val) \
+ AO_fetch_compare_and_swap_write(addr, old_val, new_val)
+# define AO_HAVE_fetch_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_fetch_compare_and_swap_release_write) \
+ && defined(AO_HAVE_fetch_compare_and_swap_release)
+# define AO_fetch_compare_and_swap_release_write(addr,old_val,new_val) \
+ AO_fetch_compare_and_swap_release(addr, old_val, new_val)
+# define AO_HAVE_fetch_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_fetch_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_fetch_compare_and_swap_read)
+# define AO_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \
+ AO_fetch_compare_and_swap_read(addr, old_val, new_val)
+# define AO_HAVE_fetch_compare_and_swap_acquire_read
+#endif
+#if !defined(AO_HAVE_fetch_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_fetch_compare_and_swap_acquire)
+# define AO_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \
+ AO_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+# define AO_HAVE_fetch_compare_and_swap_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_fetch_compare_and_swap_acquire_read)
+# define AO_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \
+ AO_fetch_compare_and_swap_acquire_read(addr, old_val, new_val)
+# define AO_HAVE_fetch_compare_and_swap_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_fetch_compare_and_swap)
+# define AO_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \
+ AO_fetch_compare_and_swap(addr, old_val, new_val)
+# define AO_HAVE_fetch_compare_and_swap_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* compare_and_swap */
+#if defined(AO_HAVE_compare_and_swap) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_compare_and_swap_acquire)
+ AO_INLINE int
+ AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old,
+ AO_t new_val)
+ {
+ int result = AO_compare_and_swap(addr, old, new_val);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_compare_and_swap_acquire
+#endif
+#if defined(AO_HAVE_compare_and_swap) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_compare_and_swap_release)
+# define AO_compare_and_swap_release(addr, old, new_val) \
+ (AO_nop_full(), AO_compare_and_swap(addr, old, new_val))
+# define AO_HAVE_compare_and_swap_release
+#endif
+#if defined(AO_HAVE_compare_and_swap_full)
+# if !defined(AO_HAVE_compare_and_swap_release)
+# define AO_compare_and_swap_release(addr, old, new_val) \
+ AO_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_release
+# endif
+# if !defined(AO_HAVE_compare_and_swap_acquire)
+# define AO_compare_and_swap_acquire(addr, old, new_val) \
+ AO_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_acquire
+# endif
+# if !defined(AO_HAVE_compare_and_swap_write)
+# define AO_compare_and_swap_write(addr, old, new_val) \
+ AO_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_write
+# endif
+# if !defined(AO_HAVE_compare_and_swap_read)
+# define AO_compare_and_swap_read(addr, old, new_val) \
+ AO_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_read
+# endif
+#endif /* AO_HAVE_compare_and_swap_full */
+
+#if !defined(AO_HAVE_compare_and_swap) \
+ && defined(AO_HAVE_compare_and_swap_release)
+# define AO_compare_and_swap(addr, old, new_val) \
+ AO_compare_and_swap_release(addr, old, new_val)
+# define AO_HAVE_compare_and_swap
+#endif
+#if !defined(AO_HAVE_compare_and_swap) \
+ && defined(AO_HAVE_compare_and_swap_acquire)
+# define AO_compare_and_swap(addr, old, new_val) \
+ AO_compare_and_swap_acquire(addr, old, new_val)
+# define AO_HAVE_compare_and_swap
+#endif
+#if !defined(AO_HAVE_compare_and_swap) \
+ && defined(AO_HAVE_compare_and_swap_write)
+# define AO_compare_and_swap(addr, old, new_val) \
+ AO_compare_and_swap_write(addr, old, new_val)
+# define AO_HAVE_compare_and_swap
+#endif
+#if !defined(AO_HAVE_compare_and_swap) \
+ && defined(AO_HAVE_compare_and_swap_read)
+# define AO_compare_and_swap(addr, old, new_val) \
+ AO_compare_and_swap_read(addr, old, new_val)
+# define AO_HAVE_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_compare_and_swap_full)
+# define AO_compare_and_swap_full(addr, old, new_val) \
+ (AO_nop_full(), \
+ AO_compare_and_swap_acquire(addr, old, new_val))
+# define AO_HAVE_compare_and_swap_full
+#endif
+
+#if !defined(AO_HAVE_compare_and_swap_release_write) \
+ && defined(AO_HAVE_compare_and_swap_write)
+# define AO_compare_and_swap_release_write(addr, old, new_val) \
+ AO_compare_and_swap_write(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_compare_and_swap_release_write) \
+ && defined(AO_HAVE_compare_and_swap_release)
+# define AO_compare_and_swap_release_write(addr, old, new_val) \
+ AO_compare_and_swap_release(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_compare_and_swap_read)
+# define AO_compare_and_swap_acquire_read(addr, old, new_val) \
+ AO_compare_and_swap_read(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_acquire_read
+#endif
+#if !defined(AO_HAVE_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_compare_and_swap_acquire)
+# define AO_compare_and_swap_acquire_read(addr, old, new_val) \
+ AO_compare_and_swap_acquire(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_compare_and_swap_acquire_read)
+# define AO_compare_and_swap_dd_acquire_read(addr, old, new_val) \
+ AO_compare_and_swap_acquire_read(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_compare_and_swap)
+# define AO_compare_and_swap_dd_acquire_read(addr, old, new_val) \
+ AO_compare_and_swap(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* load */
+#if defined(AO_HAVE_load_full) && !defined(AO_HAVE_load_acquire)
+# define AO_load_acquire(addr) AO_load_full(addr)
+# define AO_HAVE_load_acquire
+#endif
+
+#if defined(AO_HAVE_load_acquire) && !defined(AO_HAVE_load)
+# define AO_load(addr) AO_load_acquire(addr)
+# define AO_HAVE_load
+#endif
+
+#if defined(AO_HAVE_load_full) && !defined(AO_HAVE_load_read)
+# define AO_load_read(addr) AO_load_full(addr)
+# define AO_HAVE_load_read
+#endif
+
+#if !defined(AO_HAVE_load_acquire_read) \
+ && defined(AO_HAVE_load_acquire)
+# define AO_load_acquire_read(addr) AO_load_acquire(addr)
+# define AO_HAVE_load_acquire_read
+#endif
+
+#if defined(AO_HAVE_load) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_load_acquire)
+ AO_INLINE AO_t
+ AO_load_acquire(const volatile AO_t *addr)
+ {
+ AO_t result = AO_load(addr);
+
+ /* Acquire barrier would be useless, since the load could be delayed */
+ /* beyond it. */
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_load_acquire
+#endif
+
+#if defined(AO_HAVE_load) && defined(AO_HAVE_nop_read) \
+ && !defined(AO_HAVE_load_read)
+ AO_INLINE AO_t
+ AO_load_read(const volatile AO_t *addr)
+ {
+ AO_t result = AO_load(addr);
+
+ AO_nop_read();
+ return result;
+ }
+# define AO_HAVE_load_read
+#endif
+
+#if defined(AO_HAVE_load_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_load_full)
+# define AO_load_full(addr) (AO_nop_full(), AO_load_acquire(addr))
+# define AO_HAVE_load_full
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_read) \
+ && !defined(AO_HAVE_load_read)
+# define AO_CAS_BASED_LOAD_READ
+ AO_INLINE AO_t
+ AO_load_read(const volatile AO_t *addr)
+ {
+ AO_t result;
+
+ do {
+ result = *(const AO_t *)addr;
+ } while (AO_EXPECT_FALSE(!AO_compare_and_swap_read(
+ (volatile AO_t *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_load_read
+#endif
+
+#if !defined(AO_HAVE_load_acquire_read) \
+ && defined(AO_HAVE_load_read)
+# define AO_load_acquire_read(addr) AO_load_read(addr)
+# define AO_HAVE_load_acquire_read
+#endif
+
+#if defined(AO_HAVE_load_acquire_read) && !defined(AO_HAVE_load) \
+ && (!defined(AO_CAS_BASED_LOAD_READ) \
+ || !defined(AO_HAVE_compare_and_swap))
+# define AO_load(addr) AO_load_acquire_read(addr)
+# define AO_HAVE_load
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_full) \
+ && !defined(AO_HAVE_load_full)
+ AO_INLINE AO_t
+ AO_load_full(const volatile AO_t *addr)
+ {
+ AO_t result;
+
+ do {
+ result = *(const AO_t *)addr;
+ } while (AO_EXPECT_FALSE(!AO_compare_and_swap_full(
+ (volatile AO_t *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_load_full
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_load_acquire)
+ AO_INLINE AO_t
+ AO_load_acquire(const volatile AO_t *addr)
+ {
+ AO_t result;
+
+ do {
+ result = *(const AO_t *)addr;
+ } while (AO_EXPECT_FALSE(!AO_compare_and_swap_acquire(
+ (volatile AO_t *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_load_acquire
+#endif
+
+#if defined(AO_HAVE_compare_and_swap) && !defined(AO_HAVE_load)
+ AO_INLINE AO_t
+ AO_load(const volatile AO_t *addr)
+ {
+ AO_t result;
+
+ do {
+ result = *(const AO_t *)addr;
+ } while (AO_EXPECT_FALSE(!AO_compare_and_swap(
+ (volatile AO_t *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_load
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_load_acquire_read)
+# define AO_load_dd_acquire_read(addr) \
+ AO_load_acquire_read(addr)
+# define AO_HAVE_load_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_load)
+# define AO_load_dd_acquire_read(addr) AO_load(addr)
+# define AO_HAVE_load_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* store */
+#if defined(AO_HAVE_store_full) && !defined(AO_HAVE_store_release)
+# define AO_store_release(addr, val) AO_store_full(addr, val)
+# define AO_HAVE_store_release
+#endif
+
+#if defined(AO_HAVE_store_release) && !defined(AO_HAVE_store)
+# define AO_store(addr, val) AO_store_release(addr, val)
+# define AO_HAVE_store
+#endif
+
+#if defined(AO_HAVE_store_full) && !defined(AO_HAVE_store_write)
+# define AO_store_write(addr, val) AO_store_full(addr, val)
+# define AO_HAVE_store_write
+#endif
+
+#if defined(AO_HAVE_store_release) \
+ && !defined(AO_HAVE_store_release_write)
+# define AO_store_release_write(addr, val) \
+ AO_store_release(addr, val)
+# define AO_HAVE_store_release_write
+#endif
+
+#if defined(AO_HAVE_store_write) && !defined(AO_HAVE_store)
+# define AO_store(addr, val) AO_store_write(addr, val)
+# define AO_HAVE_store
+#endif
+
+#if defined(AO_HAVE_store) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_store_release)
+# define AO_store_release(addr, val) \
+ (AO_nop_full(), AO_store(addr, val))
+# define AO_HAVE_store_release
+#endif
+
+#if defined(AO_HAVE_store) && defined(AO_HAVE_nop_write) \
+ && !defined(AO_HAVE_store_write)
+# define AO_store_write(addr, val) \
+ (AO_nop_write(), AO_store(addr, val))
+# define AO_HAVE_store_write
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_write) \
+ && !defined(AO_HAVE_store_write)
+ AO_INLINE void
+ AO_store_write(volatile AO_t *addr, AO_t new_val)
+ {
+ AO_t old_val;
+
+ do {
+ old_val = *(AO_t *)addr;
+ } while (AO_EXPECT_FALSE(!AO_compare_and_swap_write(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_store_write
+#endif
+
+#if defined(AO_HAVE_store_write) \
+ && !defined(AO_HAVE_store_release_write)
+# define AO_store_release_write(addr, val) \
+ AO_store_write(addr, val)
+# define AO_HAVE_store_release_write
+#endif
+
+#if defined(AO_HAVE_store_release) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_store_full)
+# define AO_store_full(addr, val) \
+ (AO_store_release(addr, val), \
+ AO_nop_full())
+# define AO_HAVE_store_full
+#endif
+
+#if defined(AO_HAVE_compare_and_swap) && !defined(AO_HAVE_store)
+ AO_INLINE void
+ AO_store(volatile AO_t *addr, AO_t new_val)
+ {
+ AO_t old_val;
+
+ do {
+ old_val = *(AO_t *)addr;
+ } while (AO_EXPECT_FALSE(!AO_compare_and_swap(addr,
+ old_val, new_val)));
+ }
+# define AO_HAVE_store
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_release) \
+ && !defined(AO_HAVE_store_release)
+ AO_INLINE void
+ AO_store_release(volatile AO_t *addr, AO_t new_val)
+ {
+ AO_t old_val;
+
+ do {
+ old_val = *(AO_t *)addr;
+ } while (AO_EXPECT_FALSE(!AO_compare_and_swap_release(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_store_release
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_full) \
+ && !defined(AO_HAVE_store_full)
+ AO_INLINE void
+ AO_store_full(volatile AO_t *addr, AO_t new_val)
+ {
+ AO_t old_val;
+
+ do {
+ old_val = *(AO_t *)addr;
+ } while (AO_EXPECT_FALSE(!AO_compare_and_swap_full(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_store_full
+#endif
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* double_fetch_compare_and_swap */
+#if defined(AO_HAVE_double_fetch_compare_and_swap) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_double_fetch_compare_and_swap_acquire)
+ AO_INLINE AO_double_t
+ AO_double_fetch_compare_and_swap_acquire(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ AO_double_t result = AO_double_fetch_compare_and_swap(addr, old_val, new_val);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_double_fetch_compare_and_swap_acquire
+#endif
+#if defined(AO_HAVE_double_fetch_compare_and_swap) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_double_fetch_compare_and_swap_release)
+# define AO_double_fetch_compare_and_swap_release(addr, old_val, new_val) \
+ (AO_nop_full(), \
+ AO_double_fetch_compare_and_swap(addr, old_val, new_val))
+# define AO_HAVE_double_fetch_compare_and_swap_release
+#endif
+#if defined(AO_HAVE_double_fetch_compare_and_swap_full)
+# if !defined(AO_HAVE_double_fetch_compare_and_swap_release)
+# define AO_double_fetch_compare_and_swap_release(addr, old_val, new_val) \
+ AO_double_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_double_fetch_compare_and_swap_release
+# endif
+# if !defined(AO_HAVE_double_fetch_compare_and_swap_acquire)
+# define AO_double_fetch_compare_and_swap_acquire(addr, old_val, new_val) \
+ AO_double_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_double_fetch_compare_and_swap_acquire
+# endif
+# if !defined(AO_HAVE_double_fetch_compare_and_swap_write)
+# define AO_double_fetch_compare_and_swap_write(addr, old_val, new_val) \
+ AO_double_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_double_fetch_compare_and_swap_write
+# endif
+# if !defined(AO_HAVE_double_fetch_compare_and_swap_read)
+# define AO_double_fetch_compare_and_swap_read(addr, old_val, new_val) \
+ AO_double_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_double_fetch_compare_and_swap_read
+# endif
+#endif /* AO_HAVE_double_fetch_compare_and_swap_full */
+
+#if !defined(AO_HAVE_double_fetch_compare_and_swap) \
+ && defined(AO_HAVE_double_fetch_compare_and_swap_release)
+# define AO_double_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_double_fetch_compare_and_swap_release(addr, old_val, new_val)
+# define AO_HAVE_double_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_double_fetch_compare_and_swap) \
+ && defined(AO_HAVE_double_fetch_compare_and_swap_acquire)
+# define AO_double_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_double_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+# define AO_HAVE_double_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_double_fetch_compare_and_swap) \
+ && defined(AO_HAVE_double_fetch_compare_and_swap_write)
+# define AO_double_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_double_fetch_compare_and_swap_write(addr, old_val, new_val)
+# define AO_HAVE_double_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_double_fetch_compare_and_swap) \
+ && defined(AO_HAVE_double_fetch_compare_and_swap_read)
+# define AO_double_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_double_fetch_compare_and_swap_read(addr, old_val, new_val)
+# define AO_HAVE_double_fetch_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_double_fetch_compare_and_swap_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_double_fetch_compare_and_swap_full)
+# define AO_double_fetch_compare_and_swap_full(addr, old_val, new_val) \
+ (AO_nop_full(), \
+ AO_double_fetch_compare_and_swap_acquire(addr, old_val, new_val))
+# define AO_HAVE_double_fetch_compare_and_swap_full
+#endif
+
+#if !defined(AO_HAVE_double_fetch_compare_and_swap_release_write) \
+ && defined(AO_HAVE_double_fetch_compare_and_swap_write)
+# define AO_double_fetch_compare_and_swap_release_write(addr,old_val,new_val) \
+ AO_double_fetch_compare_and_swap_write(addr, old_val, new_val)
+# define AO_HAVE_double_fetch_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_double_fetch_compare_and_swap_release_write) \
+ && defined(AO_HAVE_double_fetch_compare_and_swap_release)
+# define AO_double_fetch_compare_and_swap_release_write(addr,old_val,new_val) \
+ AO_double_fetch_compare_and_swap_release(addr, old_val, new_val)
+# define AO_HAVE_double_fetch_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_double_fetch_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_double_fetch_compare_and_swap_read)
+# define AO_double_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \
+ AO_double_fetch_compare_and_swap_read(addr, old_val, new_val)
+# define AO_HAVE_double_fetch_compare_and_swap_acquire_read
+#endif
+#if !defined(AO_HAVE_double_fetch_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_double_fetch_compare_and_swap_acquire)
+# define AO_double_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \
+ AO_double_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+# define AO_HAVE_double_fetch_compare_and_swap_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_double_fetch_compare_and_swap_acquire_read)
+# define AO_double_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \
+ AO_double_fetch_compare_and_swap_acquire_read(addr, old_val, new_val)
+# define AO_HAVE_double_fetch_compare_and_swap_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_double_fetch_compare_and_swap)
+# define AO_double_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \
+ AO_double_fetch_compare_and_swap(addr, old_val, new_val)
+# define AO_HAVE_double_fetch_compare_and_swap_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* double_compare_and_swap */
+#if defined(AO_HAVE_double_compare_and_swap) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_double_compare_and_swap_acquire)
+ AO_INLINE int
+ AO_double_compare_and_swap_acquire(volatile AO_double_t *addr, AO_double_t old,
+ AO_double_t new_val)
+ {
+ int result = AO_double_compare_and_swap(addr, old, new_val);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_double_compare_and_swap_acquire
+#endif
+#if defined(AO_HAVE_double_compare_and_swap) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_double_compare_and_swap_release)
+# define AO_double_compare_and_swap_release(addr, old, new_val) \
+ (AO_nop_full(), AO_double_compare_and_swap(addr, old, new_val))
+# define AO_HAVE_double_compare_and_swap_release
+#endif
+#if defined(AO_HAVE_double_compare_and_swap_full)
+# if !defined(AO_HAVE_double_compare_and_swap_release)
+# define AO_double_compare_and_swap_release(addr, old, new_val) \
+ AO_double_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_double_compare_and_swap_release
+# endif
+# if !defined(AO_HAVE_double_compare_and_swap_acquire)
+# define AO_double_compare_and_swap_acquire(addr, old, new_val) \
+ AO_double_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_double_compare_and_swap_acquire
+# endif
+# if !defined(AO_HAVE_double_compare_and_swap_write)
+# define AO_double_compare_and_swap_write(addr, old, new_val) \
+ AO_double_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_double_compare_and_swap_write
+# endif
+# if !defined(AO_HAVE_double_compare_and_swap_read)
+# define AO_double_compare_and_swap_read(addr, old, new_val) \
+ AO_double_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_double_compare_and_swap_read
+# endif
+#endif /* AO_HAVE_double_compare_and_swap_full */
+
+#if !defined(AO_HAVE_double_compare_and_swap) \
+ && defined(AO_HAVE_double_compare_and_swap_release)
+# define AO_double_compare_and_swap(addr, old, new_val) \
+ AO_double_compare_and_swap_release(addr, old, new_val)
+# define AO_HAVE_double_compare_and_swap
+#endif
+#if !defined(AO_HAVE_double_compare_and_swap) \
+ && defined(AO_HAVE_double_compare_and_swap_acquire)
+# define AO_double_compare_and_swap(addr, old, new_val) \
+ AO_double_compare_and_swap_acquire(addr, old, new_val)
+# define AO_HAVE_double_compare_and_swap
+#endif
+#if !defined(AO_HAVE_double_compare_and_swap) \
+ && defined(AO_HAVE_double_compare_and_swap_write)
+# define AO_double_compare_and_swap(addr, old, new_val) \
+ AO_double_compare_and_swap_write(addr, old, new_val)
+# define AO_HAVE_double_compare_and_swap
+#endif
+#if !defined(AO_HAVE_double_compare_and_swap) \
+ && defined(AO_HAVE_double_compare_and_swap_read)
+# define AO_double_compare_and_swap(addr, old, new_val) \
+ AO_double_compare_and_swap_read(addr, old, new_val)
+# define AO_HAVE_double_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_double_compare_and_swap_full)
+# define AO_double_compare_and_swap_full(addr, old, new_val) \
+ (AO_nop_full(), \
+ AO_double_compare_and_swap_acquire(addr, old, new_val))
+# define AO_HAVE_double_compare_and_swap_full
+#endif
+
+#if !defined(AO_HAVE_double_compare_and_swap_release_write) \
+ && defined(AO_HAVE_double_compare_and_swap_write)
+# define AO_double_compare_and_swap_release_write(addr, old, new_val) \
+ AO_double_compare_and_swap_write(addr, old, new_val)
+# define AO_HAVE_double_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_double_compare_and_swap_release_write) \
+ && defined(AO_HAVE_double_compare_and_swap_release)
+# define AO_double_compare_and_swap_release_write(addr, old, new_val) \
+ AO_double_compare_and_swap_release(addr, old, new_val)
+# define AO_HAVE_double_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_double_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_double_compare_and_swap_read)
+# define AO_double_compare_and_swap_acquire_read(addr, old, new_val) \
+ AO_double_compare_and_swap_read(addr, old, new_val)
+# define AO_HAVE_double_compare_and_swap_acquire_read
+#endif
+#if !defined(AO_HAVE_double_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_double_compare_and_swap_acquire)
+# define AO_double_compare_and_swap_acquire_read(addr, old, new_val) \
+ AO_double_compare_and_swap_acquire(addr, old, new_val)
+# define AO_HAVE_double_compare_and_swap_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_double_compare_and_swap_acquire_read)
+# define AO_double_compare_and_swap_dd_acquire_read(addr, old, new_val) \
+ AO_double_compare_and_swap_acquire_read(addr, old, new_val)
+# define AO_HAVE_double_compare_and_swap_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_double_compare_and_swap)
+# define AO_double_compare_and_swap_dd_acquire_read(addr, old, new_val) \
+ AO_double_compare_and_swap(addr, old, new_val)
+# define AO_HAVE_double_compare_and_swap_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* double_load */
+#if defined(AO_HAVE_double_load_full) && !defined(AO_HAVE_double_load_acquire)
+# define AO_double_load_acquire(addr) AO_double_load_full(addr)
+# define AO_HAVE_double_load_acquire
+#endif
+
+#if defined(AO_HAVE_double_load_acquire) && !defined(AO_HAVE_double_load)
+# define AO_double_load(addr) AO_double_load_acquire(addr)
+# define AO_HAVE_double_load
+#endif
+
+#if defined(AO_HAVE_double_load_full) && !defined(AO_HAVE_double_load_read)
+# define AO_double_load_read(addr) AO_double_load_full(addr)
+# define AO_HAVE_double_load_read
+#endif
+
+#if !defined(AO_HAVE_double_load_acquire_read) \
+ && defined(AO_HAVE_double_load_acquire)
+# define AO_double_load_acquire_read(addr) AO_double_load_acquire(addr)
+# define AO_HAVE_double_load_acquire_read
+#endif
+
+#if defined(AO_HAVE_double_load) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_double_load_acquire)
+ AO_INLINE AO_double_t
+ AO_double_load_acquire(const volatile AO_double_t *addr)
+ {
+ AO_double_t result = AO_double_load(addr);
+
+ /* Acquire barrier would be useless, since the load could be delayed */
+ /* beyond it. */
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_double_load_acquire
+#endif
+
+#if defined(AO_HAVE_double_load) && defined(AO_HAVE_nop_read) \
+ && !defined(AO_HAVE_double_load_read)
+ AO_INLINE AO_double_t
+ AO_double_load_read(const volatile AO_double_t *addr)
+ {
+ AO_double_t result = AO_double_load(addr);
+
+ AO_nop_read();
+ return result;
+ }
+# define AO_HAVE_double_load_read
+#endif
+
+#if defined(AO_HAVE_double_load_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_double_load_full)
+# define AO_double_load_full(addr) (AO_nop_full(), AO_double_load_acquire(addr))
+# define AO_HAVE_double_load_full
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap_read) \
+ && !defined(AO_HAVE_double_load_read)
+# define AO_double_CAS_BASED_LOAD_READ
+ AO_INLINE AO_double_t
+ AO_double_load_read(const volatile AO_double_t *addr)
+ {
+ AO_double_t result;
+
+ do {
+ result = *(const AO_double_t *)addr;
+ } while (AO_EXPECT_FALSE(!AO_double_compare_and_swap_read(
+ (volatile AO_double_t *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_double_load_read
+#endif
+
+#if !defined(AO_HAVE_double_load_acquire_read) \
+ && defined(AO_HAVE_double_load_read)
+# define AO_double_load_acquire_read(addr) AO_double_load_read(addr)
+# define AO_HAVE_double_load_acquire_read
+#endif
+
+#if defined(AO_HAVE_double_load_acquire_read) && !defined(AO_HAVE_double_load) \
+ && (!defined(AO_double_CAS_BASED_LOAD_READ) \
+ || !defined(AO_HAVE_double_compare_and_swap))
+# define AO_double_load(addr) AO_double_load_acquire_read(addr)
+# define AO_HAVE_double_load
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap_full) \
+ && !defined(AO_HAVE_double_load_full)
+ AO_INLINE AO_double_t
+ AO_double_load_full(const volatile AO_double_t *addr)
+ {
+ AO_double_t result;
+
+ do {
+ result = *(const AO_double_t *)addr;
+ } while (AO_EXPECT_FALSE(!AO_double_compare_and_swap_full(
+ (volatile AO_double_t *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_double_load_full
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_double_load_acquire)
+ AO_INLINE AO_double_t
+ AO_double_load_acquire(const volatile AO_double_t *addr)
+ {
+ AO_double_t result;
+
+ do {
+ result = *(const AO_double_t *)addr;
+ } while (AO_EXPECT_FALSE(!AO_double_compare_and_swap_acquire(
+ (volatile AO_double_t *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_double_load_acquire
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap) && !defined(AO_HAVE_double_load)
+ AO_INLINE AO_double_t
+ AO_double_load(const volatile AO_double_t *addr)
+ {
+ AO_double_t result;
+
+ do {
+ result = *(const AO_double_t *)addr;
+ } while (AO_EXPECT_FALSE(!AO_double_compare_and_swap(
+ (volatile AO_double_t *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_double_load
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_double_load_acquire_read)
+# define AO_double_load_dd_acquire_read(addr) \
+ AO_double_load_acquire_read(addr)
+# define AO_HAVE_double_load_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_double_load)
+# define AO_double_load_dd_acquire_read(addr) AO_double_load(addr)
+# define AO_HAVE_double_load_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* double_store */
+#if defined(AO_HAVE_double_store_full) && !defined(AO_HAVE_double_store_release)
+# define AO_double_store_release(addr, val) AO_double_store_full(addr, val)
+# define AO_HAVE_double_store_release
+#endif
+
+#if defined(AO_HAVE_double_store_release) && !defined(AO_HAVE_double_store)
+# define AO_double_store(addr, val) AO_double_store_release(addr, val)
+# define AO_HAVE_double_store
+#endif
+
+#if defined(AO_HAVE_double_store_full) && !defined(AO_HAVE_double_store_write)
+# define AO_double_store_write(addr, val) AO_double_store_full(addr, val)
+# define AO_HAVE_double_store_write
+#endif
+
+#if defined(AO_HAVE_double_store_release) \
+ && !defined(AO_HAVE_double_store_release_write)
+# define AO_double_store_release_write(addr, val) \
+ AO_double_store_release(addr, val)
+# define AO_HAVE_double_store_release_write
+#endif
+
+#if defined(AO_HAVE_double_store_write) && !defined(AO_HAVE_double_store)
+# define AO_double_store(addr, val) AO_double_store_write(addr, val)
+# define AO_HAVE_double_store
+#endif
+
+#if defined(AO_HAVE_double_store) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_double_store_release)
+# define AO_double_store_release(addr, val) \
+ (AO_nop_full(), AO_double_store(addr, val))
+# define AO_HAVE_double_store_release
+#endif
+
+#if defined(AO_HAVE_double_store) && defined(AO_HAVE_nop_write) \
+ && !defined(AO_HAVE_double_store_write)
+# define AO_double_store_write(addr, val) \
+ (AO_nop_write(), AO_double_store(addr, val))
+# define AO_HAVE_double_store_write
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap_write) \
+ && !defined(AO_HAVE_double_store_write)
+ AO_INLINE void
+ AO_double_store_write(volatile AO_double_t *addr, AO_double_t new_val)
+ {
+ AO_double_t old_val;
+
+ do {
+ old_val = *(AO_double_t *)addr;
+ } while (AO_EXPECT_FALSE(!AO_double_compare_and_swap_write(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_double_store_write
+#endif
+
+#if defined(AO_HAVE_double_store_write) \
+ && !defined(AO_HAVE_double_store_release_write)
+# define AO_double_store_release_write(addr, val) \
+ AO_double_store_write(addr, val)
+# define AO_HAVE_double_store_release_write
+#endif
+
+#if defined(AO_HAVE_double_store_release) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_double_store_full)
+# define AO_double_store_full(addr, val) \
+ (AO_double_store_release(addr, val), \
+ AO_nop_full())
+# define AO_HAVE_double_store_full
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap) && !defined(AO_HAVE_double_store)
+ AO_INLINE void
+ AO_double_store(volatile AO_double_t *addr, AO_double_t new_val)
+ {
+ AO_double_t old_val;
+
+ do {
+ old_val = *(AO_double_t *)addr;
+ } while (AO_EXPECT_FALSE(!AO_double_compare_and_swap(addr,
+ old_val, new_val)));
+ }
+# define AO_HAVE_double_store
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap_release) \
+ && !defined(AO_HAVE_double_store_release)
+ AO_INLINE void
+ AO_double_store_release(volatile AO_double_t *addr, AO_double_t new_val)
+ {
+ AO_double_t old_val;
+
+ do {
+ old_val = *(AO_double_t *)addr;
+ } while (AO_EXPECT_FALSE(!AO_double_compare_and_swap_release(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_double_store_release
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap_full) \
+ && !defined(AO_HAVE_double_store_full)
+ AO_INLINE void
+ AO_double_store_full(volatile AO_double_t *addr, AO_double_t new_val)
+ {
+ AO_double_t old_val;
+
+ do {
+ old_val = *(AO_double_t *)addr;
+ } while (AO_EXPECT_FALSE(!AO_double_compare_and_swap_full(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_double_store_full
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* XSIZE_fetch_compare_and_swap */
+#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire)
+ AO_INLINE XCTYPE
+ AO_XSIZE_fetch_compare_and_swap_acquire(volatile XCTYPE *addr,
+ XCTYPE old_val, XCTYPE new_val)
+ {
+ XCTYPE result = AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_XSIZE_fetch_compare_and_swap_acquire
+#endif
+#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release)
+# define AO_XSIZE_fetch_compare_and_swap_release(addr, old_val, new_val) \
+ (AO_nop_full(), \
+ AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val))
+# define AO_HAVE_XSIZE_fetch_compare_and_swap_release
+#endif
+#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_full)
+# if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release)
+# define AO_XSIZE_fetch_compare_and_swap_release(addr, old_val, new_val) \
+ AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_XSIZE_fetch_compare_and_swap_release
+# endif
+# if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire)
+# define AO_XSIZE_fetch_compare_and_swap_acquire(addr, old_val, new_val) \
+ AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_XSIZE_fetch_compare_and_swap_acquire
+# endif
+# if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_write)
+# define AO_XSIZE_fetch_compare_and_swap_write(addr, old_val, new_val) \
+ AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_XSIZE_fetch_compare_and_swap_write
+# endif
+# if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_read)
+# define AO_XSIZE_fetch_compare_and_swap_read(addr, old_val, new_val) \
+ AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val)
+# define AO_HAVE_XSIZE_fetch_compare_and_swap_read
+# endif
+#endif /* AO_HAVE_XSIZE_fetch_compare_and_swap_full */
+
+#if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \
+ && defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release)
+# define AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_XSIZE_fetch_compare_and_swap_release(addr, old_val, new_val)
+# define AO_HAVE_XSIZE_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \
+ && defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire)
+# define AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_XSIZE_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+# define AO_HAVE_XSIZE_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \
+ && defined(AO_HAVE_XSIZE_fetch_compare_and_swap_write)
+# define AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_XSIZE_fetch_compare_and_swap_write(addr, old_val, new_val)
+# define AO_HAVE_XSIZE_fetch_compare_and_swap
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \
+ && defined(AO_HAVE_XSIZE_fetch_compare_and_swap_read)
+# define AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val) \
+ AO_XSIZE_fetch_compare_and_swap_read(addr, old_val, new_val)
+# define AO_HAVE_XSIZE_fetch_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_full)
+# define AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val) \
+ (AO_nop_full(), \
+ AO_XSIZE_fetch_compare_and_swap_acquire(addr, old_val, new_val))
+# define AO_HAVE_XSIZE_fetch_compare_and_swap_full
+#endif
+
+#if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release_write) \
+ && defined(AO_HAVE_XSIZE_fetch_compare_and_swap_write)
+# define AO_XSIZE_fetch_compare_and_swap_release_write(addr,old_val,new_val) \
+ AO_XSIZE_fetch_compare_and_swap_write(addr, old_val, new_val)
+# define AO_HAVE_XSIZE_fetch_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release_write) \
+ && defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release)
+# define AO_XSIZE_fetch_compare_and_swap_release_write(addr,old_val,new_val) \
+ AO_XSIZE_fetch_compare_and_swap_release(addr, old_val, new_val)
+# define AO_HAVE_XSIZE_fetch_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_XSIZE_fetch_compare_and_swap_read)
+# define AO_XSIZE_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \
+ AO_XSIZE_fetch_compare_and_swap_read(addr, old_val, new_val)
+# define AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read
+#endif
+#if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire)
+# define AO_XSIZE_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \
+ AO_XSIZE_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+# define AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read)
+# define AO_XSIZE_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \
+ AO_XSIZE_fetch_compare_and_swap_acquire_read(addr, old_val, new_val)
+# define AO_HAVE_XSIZE_fetch_compare_and_swap_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_XSIZE_fetch_compare_and_swap)
+# define AO_XSIZE_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \
+ AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val)
+# define AO_HAVE_XSIZE_fetch_compare_and_swap_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* XSIZE_compare_and_swap */
+#if defined(AO_HAVE_XSIZE_compare_and_swap) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_XSIZE_compare_and_swap_acquire)
+ AO_INLINE int
+ AO_XSIZE_compare_and_swap_acquire(volatile XCTYPE *addr, XCTYPE old,
+ XCTYPE new_val)
+ {
+ int result = AO_XSIZE_compare_and_swap(addr, old, new_val);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_XSIZE_compare_and_swap_acquire
+#endif
+#if defined(AO_HAVE_XSIZE_compare_and_swap) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_XSIZE_compare_and_swap_release)
+# define AO_XSIZE_compare_and_swap_release(addr, old, new_val) \
+ (AO_nop_full(), AO_XSIZE_compare_and_swap(addr, old, new_val))
+# define AO_HAVE_XSIZE_compare_and_swap_release
+#endif
+#if defined(AO_HAVE_XSIZE_compare_and_swap_full)
+# if !defined(AO_HAVE_XSIZE_compare_and_swap_release)
+# define AO_XSIZE_compare_and_swap_release(addr, old, new_val) \
+ AO_XSIZE_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_XSIZE_compare_and_swap_release
+# endif
+# if !defined(AO_HAVE_XSIZE_compare_and_swap_acquire)
+# define AO_XSIZE_compare_and_swap_acquire(addr, old, new_val) \
+ AO_XSIZE_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_XSIZE_compare_and_swap_acquire
+# endif
+# if !defined(AO_HAVE_XSIZE_compare_and_swap_write)
+# define AO_XSIZE_compare_and_swap_write(addr, old, new_val) \
+ AO_XSIZE_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_XSIZE_compare_and_swap_write
+# endif
+# if !defined(AO_HAVE_XSIZE_compare_and_swap_read)
+# define AO_XSIZE_compare_and_swap_read(addr, old, new_val) \
+ AO_XSIZE_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_XSIZE_compare_and_swap_read
+# endif
+#endif /* AO_HAVE_XSIZE_compare_and_swap_full */
+
+#if !defined(AO_HAVE_XSIZE_compare_and_swap) \
+ && defined(AO_HAVE_XSIZE_compare_and_swap_release)
+# define AO_XSIZE_compare_and_swap(addr, old, new_val) \
+ AO_XSIZE_compare_and_swap_release(addr, old, new_val)
+# define AO_HAVE_XSIZE_compare_and_swap
+#endif
+#if !defined(AO_HAVE_XSIZE_compare_and_swap) \
+ && defined(AO_HAVE_XSIZE_compare_and_swap_acquire)
+# define AO_XSIZE_compare_and_swap(addr, old, new_val) \
+ AO_XSIZE_compare_and_swap_acquire(addr, old, new_val)
+# define AO_HAVE_XSIZE_compare_and_swap
+#endif
+#if !defined(AO_HAVE_XSIZE_compare_and_swap) \
+ && defined(AO_HAVE_XSIZE_compare_and_swap_write)
+# define AO_XSIZE_compare_and_swap(addr, old, new_val) \
+ AO_XSIZE_compare_and_swap_write(addr, old, new_val)
+# define AO_HAVE_XSIZE_compare_and_swap
+#endif
+#if !defined(AO_HAVE_XSIZE_compare_and_swap) \
+ && defined(AO_HAVE_XSIZE_compare_and_swap_read)
+# define AO_XSIZE_compare_and_swap(addr, old, new_val) \
+ AO_XSIZE_compare_and_swap_read(addr, old, new_val)
+# define AO_HAVE_XSIZE_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_XSIZE_compare_and_swap_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_XSIZE_compare_and_swap_full)
+# define AO_XSIZE_compare_and_swap_full(addr, old, new_val) \
+ (AO_nop_full(), \
+ AO_XSIZE_compare_and_swap_acquire(addr, old, new_val))
+# define AO_HAVE_XSIZE_compare_and_swap_full
+#endif
+
+#if !defined(AO_HAVE_XSIZE_compare_and_swap_release_write) \
+ && defined(AO_HAVE_XSIZE_compare_and_swap_write)
+# define AO_XSIZE_compare_and_swap_release_write(addr, old, new_val) \
+ AO_XSIZE_compare_and_swap_write(addr, old, new_val)
+# define AO_HAVE_XSIZE_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_XSIZE_compare_and_swap_release_write) \
+ && defined(AO_HAVE_XSIZE_compare_and_swap_release)
+# define AO_XSIZE_compare_and_swap_release_write(addr, old, new_val) \
+ AO_XSIZE_compare_and_swap_release(addr, old, new_val)
+# define AO_HAVE_XSIZE_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_XSIZE_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_XSIZE_compare_and_swap_read)
+# define AO_XSIZE_compare_and_swap_acquire_read(addr, old, new_val) \
+ AO_XSIZE_compare_and_swap_read(addr, old, new_val)
+# define AO_HAVE_XSIZE_compare_and_swap_acquire_read
+#endif
+#if !defined(AO_HAVE_XSIZE_compare_and_swap_acquire_read) \
+ && defined(AO_HAVE_XSIZE_compare_and_swap_acquire)
+# define AO_XSIZE_compare_and_swap_acquire_read(addr, old, new_val) \
+ AO_XSIZE_compare_and_swap_acquire(addr, old, new_val)
+# define AO_HAVE_XSIZE_compare_and_swap_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_XSIZE_compare_and_swap_acquire_read)
+# define AO_XSIZE_compare_and_swap_dd_acquire_read(addr, old, new_val) \
+ AO_XSIZE_compare_and_swap_acquire_read(addr, old, new_val)
+# define AO_HAVE_XSIZE_compare_and_swap_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_XSIZE_compare_and_swap)
+# define AO_XSIZE_compare_and_swap_dd_acquire_read(addr, old, new_val) \
+ AO_XSIZE_compare_and_swap(addr, old, new_val)
+# define AO_HAVE_XSIZE_compare_and_swap_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* XSIZE_load */
+#if defined(AO_HAVE_XSIZE_load_full) && !defined(AO_HAVE_XSIZE_load_acquire)
+# define AO_XSIZE_load_acquire(addr) AO_XSIZE_load_full(addr)
+# define AO_HAVE_XSIZE_load_acquire
+#endif
+
+#if defined(AO_HAVE_XSIZE_load_acquire) && !defined(AO_HAVE_XSIZE_load)
+# define AO_XSIZE_load(addr) AO_XSIZE_load_acquire(addr)
+# define AO_HAVE_XSIZE_load
+#endif
+
+#if defined(AO_HAVE_XSIZE_load_full) && !defined(AO_HAVE_XSIZE_load_read)
+# define AO_XSIZE_load_read(addr) AO_XSIZE_load_full(addr)
+# define AO_HAVE_XSIZE_load_read
+#endif
+
+#if !defined(AO_HAVE_XSIZE_load_acquire_read) \
+ && defined(AO_HAVE_XSIZE_load_acquire)
+# define AO_XSIZE_load_acquire_read(addr) AO_XSIZE_load_acquire(addr)
+# define AO_HAVE_XSIZE_load_acquire_read
+#endif
+
+#if defined(AO_HAVE_XSIZE_load) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_XSIZE_load_acquire)
+ AO_INLINE XCTYPE
+ AO_XSIZE_load_acquire(const volatile XCTYPE *addr)
+ {
+ XCTYPE result = AO_XSIZE_load(addr);
+
+ /* Acquire barrier would be useless, since the load could be delayed */
+ /* beyond it. */
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_XSIZE_load_acquire
+#endif
+
+#if defined(AO_HAVE_XSIZE_load) && defined(AO_HAVE_nop_read) \
+ && !defined(AO_HAVE_XSIZE_load_read)
+ AO_INLINE XCTYPE
+ AO_XSIZE_load_read(const volatile XCTYPE *addr)
+ {
+ XCTYPE result = AO_XSIZE_load(addr);
+
+ AO_nop_read();
+ return result;
+ }
+# define AO_HAVE_XSIZE_load_read
+#endif
+
+#if defined(AO_HAVE_XSIZE_load_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_XSIZE_load_full)
+# define AO_XSIZE_load_full(addr) (AO_nop_full(), AO_XSIZE_load_acquire(addr))
+# define AO_HAVE_XSIZE_load_full
+#endif
+
+#if defined(AO_HAVE_XSIZE_compare_and_swap_read) \
+ && !defined(AO_HAVE_XSIZE_load_read)
+# define AO_XSIZE_CAS_BASED_LOAD_READ
+ AO_INLINE XCTYPE
+ AO_XSIZE_load_read(const volatile XCTYPE *addr)
+ {
+ XCTYPE result;
+
+ do {
+ result = *(const XCTYPE *)addr;
+ } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_read(
+ (volatile XCTYPE *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_XSIZE_load_read
+#endif
+
+#if !defined(AO_HAVE_XSIZE_load_acquire_read) \
+ && defined(AO_HAVE_XSIZE_load_read)
+# define AO_XSIZE_load_acquire_read(addr) AO_XSIZE_load_read(addr)
+# define AO_HAVE_XSIZE_load_acquire_read
+#endif
+
+#if defined(AO_HAVE_XSIZE_load_acquire_read) && !defined(AO_HAVE_XSIZE_load) \
+ && (!defined(AO_XSIZE_CAS_BASED_LOAD_READ) \
+ || !defined(AO_HAVE_XSIZE_compare_and_swap))
+# define AO_XSIZE_load(addr) AO_XSIZE_load_acquire_read(addr)
+# define AO_HAVE_XSIZE_load
+#endif
+
+#if defined(AO_HAVE_XSIZE_compare_and_swap_full) \
+ && !defined(AO_HAVE_XSIZE_load_full)
+ AO_INLINE XCTYPE
+ AO_XSIZE_load_full(const volatile XCTYPE *addr)
+ {
+ XCTYPE result;
+
+ do {
+ result = *(const XCTYPE *)addr;
+ } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full(
+ (volatile XCTYPE *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_XSIZE_load_full
+#endif
+
+#if defined(AO_HAVE_XSIZE_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_XSIZE_load_acquire)
+ AO_INLINE XCTYPE
+ AO_XSIZE_load_acquire(const volatile XCTYPE *addr)
+ {
+ XCTYPE result;
+
+ do {
+ result = *(const XCTYPE *)addr;
+ } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_acquire(
+ (volatile XCTYPE *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_XSIZE_load_acquire
+#endif
+
+#if defined(AO_HAVE_XSIZE_compare_and_swap) && !defined(AO_HAVE_XSIZE_load)
+ AO_INLINE XCTYPE
+ AO_XSIZE_load(const volatile XCTYPE *addr)
+ {
+ XCTYPE result;
+
+ do {
+ result = *(const XCTYPE *)addr;
+ } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap(
+ (volatile XCTYPE *)addr,
+ result, result)));
+ return result;
+ }
+# define AO_HAVE_XSIZE_load
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_XSIZE_load_acquire_read)
+# define AO_XSIZE_load_dd_acquire_read(addr) \
+ AO_XSIZE_load_acquire_read(addr)
+# define AO_HAVE_XSIZE_load_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_XSIZE_load)
+# define AO_XSIZE_load_dd_acquire_read(addr) AO_XSIZE_load(addr)
+# define AO_HAVE_XSIZE_load_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* XSIZE_store */
+#if defined(AO_HAVE_XSIZE_store_full) && !defined(AO_HAVE_XSIZE_store_release)
+# define AO_XSIZE_store_release(addr, val) AO_XSIZE_store_full(addr, val)
+# define AO_HAVE_XSIZE_store_release
+#endif
+
+#if defined(AO_HAVE_XSIZE_store_release) && !defined(AO_HAVE_XSIZE_store)
+# define AO_XSIZE_store(addr, val) AO_XSIZE_store_release(addr, val)
+# define AO_HAVE_XSIZE_store
+#endif
+
+#if defined(AO_HAVE_XSIZE_store_full) && !defined(AO_HAVE_XSIZE_store_write)
+# define AO_XSIZE_store_write(addr, val) AO_XSIZE_store_full(addr, val)
+# define AO_HAVE_XSIZE_store_write
+#endif
+
+#if defined(AO_HAVE_XSIZE_store_release) \
+ && !defined(AO_HAVE_XSIZE_store_release_write)
+# define AO_XSIZE_store_release_write(addr, val) \
+ AO_XSIZE_store_release(addr, val)
+# define AO_HAVE_XSIZE_store_release_write
+#endif
+
+#if defined(AO_HAVE_XSIZE_store_write) && !defined(AO_HAVE_XSIZE_store)
+# define AO_XSIZE_store(addr, val) AO_XSIZE_store_write(addr, val)
+# define AO_HAVE_XSIZE_store
+#endif
+
+#if defined(AO_HAVE_XSIZE_store) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_XSIZE_store_release)
+# define AO_XSIZE_store_release(addr, val) \
+ (AO_nop_full(), AO_XSIZE_store(addr, val))
+# define AO_HAVE_XSIZE_store_release
+#endif
+
+#if defined(AO_HAVE_XSIZE_store) && defined(AO_HAVE_nop_write) \
+ && !defined(AO_HAVE_XSIZE_store_write)
+# define AO_XSIZE_store_write(addr, val) \
+ (AO_nop_write(), AO_XSIZE_store(addr, val))
+# define AO_HAVE_XSIZE_store_write
+#endif
+
+#if defined(AO_HAVE_XSIZE_compare_and_swap_write) \
+ && !defined(AO_HAVE_XSIZE_store_write)
+ AO_INLINE void
+ AO_XSIZE_store_write(volatile XCTYPE *addr, XCTYPE new_val)
+ {
+ XCTYPE old_val;
+
+ do {
+ old_val = *(XCTYPE *)addr;
+ } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_write(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_XSIZE_store_write
+#endif
+
+#if defined(AO_HAVE_XSIZE_store_write) \
+ && !defined(AO_HAVE_XSIZE_store_release_write)
+# define AO_XSIZE_store_release_write(addr, val) \
+ AO_XSIZE_store_write(addr, val)
+# define AO_HAVE_XSIZE_store_release_write
+#endif
+
+#if defined(AO_HAVE_XSIZE_store_release) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_XSIZE_store_full)
+# define AO_XSIZE_store_full(addr, val) \
+ (AO_XSIZE_store_release(addr, val), \
+ AO_nop_full())
+# define AO_HAVE_XSIZE_store_full
+#endif
+
+#if defined(AO_HAVE_XSIZE_compare_and_swap) && !defined(AO_HAVE_XSIZE_store)
+ AO_INLINE void
+ AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE new_val)
+ {
+ XCTYPE old_val;
+
+ do {
+ old_val = *(XCTYPE *)addr;
+ } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap(addr,
+ old_val, new_val)));
+ }
+# define AO_HAVE_XSIZE_store
+#endif
+
+#if defined(AO_HAVE_XSIZE_compare_and_swap_release) \
+ && !defined(AO_HAVE_XSIZE_store_release)
+ AO_INLINE void
+ AO_XSIZE_store_release(volatile XCTYPE *addr, XCTYPE new_val)
+ {
+ XCTYPE old_val;
+
+ do {
+ old_val = *(XCTYPE *)addr;
+ } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_release(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_XSIZE_store_release
+#endif
+
+#if defined(AO_HAVE_XSIZE_compare_and_swap_full) \
+ && !defined(AO_HAVE_XSIZE_store_full)
+ AO_INLINE void
+ AO_XSIZE_store_full(volatile XCTYPE *addr, XCTYPE new_val)
+ {
+ XCTYPE old_val;
+
+ do {
+ old_val = *(XCTYPE *)addr;
+ } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full(addr, old_val,
+ new_val)));
+ }
+# define AO_HAVE_XSIZE_store_full
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * Generalize atomic operations for atomic_ops.h.
+ * Should not be included directly.
+ *
+ * We make no attempt to define useless operations, such as
+ * AO_nop_acquire
+ * AO_nop_release
+ *
+ * We have also so far neglected to define some others, which
+ * do not appear likely to be useful, e.g. stores with acquire
+ * or read barriers.
+ *
+ * This file is sometimes included twice by atomic_ops.h.
+ * All definitions include explicit checks that we are not replacing
+ * an earlier definition. In general, more desirable expansions
+ * appear earlier so that we are more likely to use them.
+ *
+ * We only make safe generalizations, except that by default we define
+ * the ...dd_acquire_read operations to be equivalent to those without
+ * a barrier. On platforms for which this is unsafe, the platform-specific
+ * file must define AO_NO_DD_ORDERING.
+ */
+
+#ifndef AO_ATOMIC_OPS_H
+# error This file should not be included directly.
+#endif
+
+/* Generate test_and_set_full, if necessary and possible. */
+#if !defined(AO_HAVE_test_and_set) && !defined(AO_HAVE_test_and_set_release) \
+ && !defined(AO_HAVE_test_and_set_acquire) \
+ && !defined(AO_HAVE_test_and_set_read) \
+ && !defined(AO_HAVE_test_and_set_full)
+
+ /* Emulate AO_compare_and_swap() via AO_fetch_compare_and_swap(). */
+# if defined(AO_HAVE_fetch_compare_and_swap) \
+ && !defined(AO_HAVE_compare_and_swap)
+ AO_INLINE int
+ AO_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
+ {
+ return AO_fetch_compare_and_swap(addr, old_val, new_val) == old_val;
+ }
+# define AO_HAVE_compare_and_swap
+# endif
+
+# if defined(AO_HAVE_fetch_compare_and_swap_full) \
+ && !defined(AO_HAVE_compare_and_swap_full)
+ AO_INLINE int
+ AO_compare_and_swap_full(volatile AO_t *addr, AO_t old_val, AO_t new_val)
+ {
+ return AO_fetch_compare_and_swap_full(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_compare_and_swap_full
+# endif
+
+# if defined(AO_HAVE_fetch_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_compare_and_swap_acquire)
+ AO_INLINE int
+ AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+ {
+ return AO_fetch_compare_and_swap_acquire(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_compare_and_swap_acquire
+# endif
+
+# if defined(AO_HAVE_fetch_compare_and_swap_release) \
+ && !defined(AO_HAVE_compare_and_swap_release)
+ AO_INLINE int
+ AO_compare_and_swap_release(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+ {
+ return AO_fetch_compare_and_swap_release(addr, old_val, new_val)
+ == old_val;
+ }
+# define AO_HAVE_compare_and_swap_release
+# endif
+
+# if AO_CHAR_TS_T
+# define AO_TS_COMPARE_AND_SWAP_FULL(a,o,n) \
+ AO_char_compare_and_swap_full(a,o,n)
+# define AO_TS_COMPARE_AND_SWAP_ACQUIRE(a,o,n) \
+ AO_char_compare_and_swap_acquire(a,o,n)
+# define AO_TS_COMPARE_AND_SWAP_RELEASE(a,o,n) \
+ AO_char_compare_and_swap_release(a,o,n)
+# define AO_TS_COMPARE_AND_SWAP(a,o,n) AO_char_compare_and_swap(a,o,n)
+# endif
+
+# if AO_AO_TS_T
+# define AO_TS_COMPARE_AND_SWAP_FULL(a,o,n) AO_compare_and_swap_full(a,o,n)
+# define AO_TS_COMPARE_AND_SWAP_ACQUIRE(a,o,n) \
+ AO_compare_and_swap_acquire(a,o,n)
+# define AO_TS_COMPARE_AND_SWAP_RELEASE(a,o,n) \
+ AO_compare_and_swap_release(a,o,n)
+# define AO_TS_COMPARE_AND_SWAP(a,o,n) AO_compare_and_swap(a,o,n)
+# endif
+
+# if (AO_AO_TS_T && defined(AO_HAVE_compare_and_swap_full)) \
+ || (AO_CHAR_TS_T && defined(AO_HAVE_char_compare_and_swap_full))
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_full(volatile AO_TS_t *addr)
+ {
+ if (AO_TS_COMPARE_AND_SWAP_FULL(addr, AO_TS_CLEAR, AO_TS_SET))
+ return AO_TS_CLEAR;
+ else
+ return AO_TS_SET;
+ }
+# define AO_HAVE_test_and_set_full
+# endif /* AO_HAVE_compare_and_swap_full */
+
+# if (AO_AO_TS_T && defined(AO_HAVE_compare_and_swap_acquire)) \
+ || (AO_CHAR_TS_T && defined(AO_HAVE_char_compare_and_swap_acquire))
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_acquire(volatile AO_TS_t *addr)
+ {
+ if (AO_TS_COMPARE_AND_SWAP_ACQUIRE(addr, AO_TS_CLEAR, AO_TS_SET))
+ return AO_TS_CLEAR;
+ else
+ return AO_TS_SET;
+ }
+# define AO_HAVE_test_and_set_acquire
+# endif /* AO_HAVE_compare_and_swap_acquire */
+
+# if (AO_AO_TS_T && defined(AO_HAVE_compare_and_swap_release)) \
+ || (AO_CHAR_TS_T && defined(AO_HAVE_char_compare_and_swap_release))
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_release(volatile AO_TS_t *addr)
+ {
+ if (AO_TS_COMPARE_AND_SWAP_RELEASE(addr, AO_TS_CLEAR, AO_TS_SET))
+ return AO_TS_CLEAR;
+ else
+ return AO_TS_SET;
+ }
+# define AO_HAVE_test_and_set_release
+# endif /* AO_HAVE_compare_and_swap_release */
+
+# if (AO_AO_TS_T && defined(AO_HAVE_compare_and_swap)) \
+ || (AO_CHAR_TS_T && defined(AO_HAVE_char_compare_and_swap))
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set(volatile AO_TS_t *addr)
+ {
+ if (AO_TS_COMPARE_AND_SWAP(addr, AO_TS_CLEAR, AO_TS_SET))
+ return AO_TS_CLEAR;
+ else
+ return AO_TS_SET;
+ }
+# define AO_HAVE_test_and_set
+# endif /* AO_HAVE_compare_and_swap */
+#endif /* No prior test and set */
+
+/* Nop */
+#if !defined(AO_HAVE_nop)
+ AO_INLINE void AO_nop(void) {}
+# define AO_HAVE_nop
+#endif
+
+#if defined(AO_HAVE_test_and_set_full) && !defined(AO_HAVE_nop_full)
+ AO_INLINE void
+ AO_nop_full(void)
+ {
+ AO_TS_t dummy = AO_TS_INITIALIZER;
+ AO_test_and_set_full(&dummy);
+ }
+# define AO_HAVE_nop_full
+#endif
+
+#if defined(AO_HAVE_nop_acquire)
+# error AO_nop_acquire is useless: dont define.
+#endif
+#if defined(AO_HAVE_nop_release)
+# error AO_nop_release is useless: dont define.
+#endif
+
+#if defined(AO_HAVE_nop_full) && !defined(AO_HAVE_nop_read)
+# define AO_nop_read() AO_nop_full()
+# define AO_HAVE_nop_read
+#endif
+
+#if defined(AO_HAVE_nop_full) && !defined(AO_HAVE_nop_write)
+# define AO_nop_write() AO_nop_full()
+# define AO_HAVE_nop_write
+#endif
+
+/* Test_and_set */
+#if defined(AO_HAVE_test_and_set) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_test_and_set_release)
+# define AO_test_and_set_release(addr) (AO_nop_full(), AO_test_and_set(addr))
+# define AO_HAVE_test_and_set_release
+#endif
+
+#if defined(AO_HAVE_test_and_set) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_test_and_set_acquire)
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_acquire(volatile AO_TS_t *addr)
+ {
+ AO_TS_VAL_t result = AO_test_and_set(addr);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_test_and_set_acquire
+#endif
+
+#if defined(AO_HAVE_test_and_set_full)
+# if !defined(AO_HAVE_test_and_set_release)
+# define AO_test_and_set_release(addr) AO_test_and_set_full(addr)
+# define AO_HAVE_test_and_set_release
+# endif
+# if !defined(AO_HAVE_test_and_set_acquire)
+# define AO_test_and_set_acquire(addr) AO_test_and_set_full(addr)
+# define AO_HAVE_test_and_set_acquire
+# endif
+# if !defined(AO_HAVE_test_and_set_write)
+# define AO_test_and_set_write(addr) AO_test_and_set_full(addr)
+# define AO_HAVE_test_and_set_write
+# endif
+# if !defined(AO_HAVE_test_and_set_read)
+# define AO_test_and_set_read(addr) AO_test_and_set_full(addr)
+# define AO_HAVE_test_and_set_read
+# endif
+#endif /* AO_HAVE_test_and_set_full */
+
+#if !defined(AO_HAVE_test_and_set) && defined(AO_HAVE_test_and_set_release)
+# define AO_test_and_set(addr) AO_test_and_set_release(addr)
+# define AO_HAVE_test_and_set
+#endif
+#if !defined(AO_HAVE_test_and_set) && defined(AO_HAVE_test_and_set_acquire)
+# define AO_test_and_set(addr) AO_test_and_set_acquire(addr)
+# define AO_HAVE_test_and_set
+#endif
+#if !defined(AO_HAVE_test_and_set) && defined(AO_HAVE_test_and_set_write)
+# define AO_test_and_set(addr) AO_test_and_set_write(addr)
+# define AO_HAVE_test_and_set
+#endif
+#if !defined(AO_HAVE_test_and_set) && defined(AO_HAVE_test_and_set_read)
+# define AO_test_and_set(addr) AO_test_and_set_read(addr)
+# define AO_HAVE_test_and_set
+#endif
+
+#if defined(AO_HAVE_test_and_set_acquire) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_test_and_set_full)
+# define AO_test_and_set_full(addr) \
+ (AO_nop_full(), AO_test_and_set_acquire(addr))
+# define AO_HAVE_test_and_set_full
+#endif
+
+#if !defined(AO_HAVE_test_and_set_release_write) \
+ && defined(AO_HAVE_test_and_set_write)
+# define AO_test_and_set_release_write(addr) AO_test_and_set_write(addr)
+# define AO_HAVE_test_and_set_release_write
+#endif
+#if !defined(AO_HAVE_test_and_set_release_write) \
+ && defined(AO_HAVE_test_and_set_release)
+# define AO_test_and_set_release_write(addr) AO_test_and_set_release(addr)
+# define AO_HAVE_test_and_set_release_write
+#endif
+#if !defined(AO_HAVE_test_and_set_acquire_read) \
+ && defined(AO_HAVE_test_and_set_read)
+# define AO_test_and_set_acquire_read(addr) AO_test_and_set_read(addr)
+# define AO_HAVE_test_and_set_acquire_read
+#endif
+#if !defined(AO_HAVE_test_and_set_acquire_read) \
+ && defined(AO_HAVE_test_and_set_acquire)
+# define AO_test_and_set_acquire_read(addr) AO_test_and_set_acquire(addr)
+# define AO_HAVE_test_and_set_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_test_and_set_acquire_read)
+# define AO_test_and_set_dd_acquire_read(addr) \
+ AO_test_and_set_acquire_read(addr)
+# define AO_HAVE_test_and_set_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_test_and_set)
+# define AO_test_and_set_dd_acquire_read(addr) AO_test_and_set(addr)
+# define AO_HAVE_test_and_set_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+#include "generalize-small.h"
+
+#include "generalize-arithm.h"
+
+/* Compare_double_and_swap_double based on double_compare_and_swap. */
+#ifdef AO_HAVE_DOUBLE_PTR_STORAGE
+# if defined(AO_HAVE_double_compare_and_swap) \
+ && !defined(AO_HAVE_compare_double_and_swap_double)
+ AO_INLINE int
+ AO_compare_double_and_swap_double(volatile AO_double_t *addr,
+ AO_t old_val1, AO_t old_val2,
+ AO_t new_val1, AO_t new_val2)
+ {
+ AO_double_t old_w;
+ AO_double_t new_w;
+ old_w.AO_val1 = old_val1;
+ old_w.AO_val2 = old_val2;
+ new_w.AO_val1 = new_val1;
+ new_w.AO_val2 = new_val2;
+ return AO_double_compare_and_swap(addr, old_w, new_w);
+ }
+# define AO_HAVE_compare_double_and_swap_double
+# endif
+# if defined(AO_HAVE_double_compare_and_swap_full) \
+ && !defined(AO_HAVE_compare_double_and_swap_double_full)
+ AO_INLINE int
+ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
+ AO_t old_val1, AO_t old_val2,
+ AO_t new_val1, AO_t new_val2)
+ {
+ AO_double_t old_w;
+ AO_double_t new_w;
+ old_w.AO_val1 = old_val1;
+ old_w.AO_val2 = old_val2;
+ new_w.AO_val1 = new_val1;
+ new_w.AO_val2 = new_val2;
+ return AO_double_compare_and_swap_full(addr, old_w, new_w);
+ }
+# define AO_HAVE_compare_double_and_swap_double_full
+# endif
+#endif /* AO_HAVE_DOUBLE_PTR_STORAGE */
+
+/* Compare_double_and_swap_double */
+#if defined(AO_HAVE_compare_double_and_swap_double) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_compare_double_and_swap_double_acquire)
+ AO_INLINE int
+ AO_compare_double_and_swap_double_acquire(volatile AO_double_t *addr,
+ AO_t o1, AO_t o2,
+ AO_t n1, AO_t n2)
+ {
+ int result = AO_compare_double_and_swap_double(addr, o1, o2, n1, n2);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_compare_double_and_swap_double_acquire
+#endif
+#if defined(AO_HAVE_compare_double_and_swap_double) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_compare_double_and_swap_double_release)
+# define AO_compare_double_and_swap_double_release(addr,o1,o2,n1,n2) \
+ (AO_nop_full(), AO_compare_double_and_swap_double(addr,o1,o2,n1,n2))
+# define AO_HAVE_compare_double_and_swap_double_release
+#endif
+#if defined(AO_HAVE_compare_double_and_swap_double_full)
+# if !defined(AO_HAVE_compare_double_and_swap_double_release)
+# define AO_compare_double_and_swap_double_release(addr,o1,o2,n1,n2) \
+ AO_compare_double_and_swap_double_full(addr,o1,o2,n1,n2)
+# define AO_HAVE_compare_double_and_swap_double_release
+# endif
+# if !defined(AO_HAVE_compare_double_and_swap_double_acquire)
+# define AO_compare_double_and_swap_double_acquire(addr,o1,o2,n1,n2) \
+ AO_compare_double_and_swap_double_full(addr,o1,o2,n1,n2)
+# define AO_HAVE_compare_double_and_swap_double_acquire
+# endif
+# if !defined(AO_HAVE_compare_double_and_swap_double_write)
+# define AO_compare_double_and_swap_double_write(addr,o1,o2,n1,n2) \
+ AO_compare_double_and_swap_double_full(addr,o1,o2,n1,n2)
+# define AO_HAVE_compare_double_and_swap_double_write
+# endif
+# if !defined(AO_HAVE_compare_double_and_swap_double_read)
+# define AO_compare_double_and_swap_double_read(addr,o1,o2,n1,n2) \
+ AO_compare_double_and_swap_double_full(addr,o1,o2,n1,n2)
+# define AO_HAVE_compare_double_and_swap_double_read
+# endif
+#endif /* AO_HAVE_compare_double_and_swap_double_full */
+
+#if !defined(AO_HAVE_compare_double_and_swap_double) \
+ && defined(AO_HAVE_compare_double_and_swap_double_release)
+# define AO_compare_double_and_swap_double(addr,o1,o2,n1,n2) \
+ AO_compare_double_and_swap_double_release(addr,o1,o2,n1,n2)
+# define AO_HAVE_compare_double_and_swap_double
+#endif
+#if !defined(AO_HAVE_compare_double_and_swap_double) \
+ && defined(AO_HAVE_compare_double_and_swap_double_acquire)
+# define AO_compare_double_and_swap_double(addr,o1,o2,n1,n2) \
+ AO_compare_double_and_swap_double_acquire(addr,o1,o2,n1,n2)
+# define AO_HAVE_compare_double_and_swap_double
+#endif
+#if !defined(AO_HAVE_compare_double_and_swap_double) \
+ && defined(AO_HAVE_compare_double_and_swap_double_write)
+# define AO_compare_double_and_swap_double(addr,o1,o2,n1,n2) \
+ AO_compare_double_and_swap_double_write(addr,o1,o2,n1,n2)
+# define AO_HAVE_compare_double_and_swap_double
+#endif
+#if !defined(AO_HAVE_compare_double_and_swap_double) \
+ && defined(AO_HAVE_compare_double_and_swap_double_read)
+# define AO_compare_double_and_swap_double(addr,o1,o2,n1,n2) \
+ AO_compare_double_and_swap_double_read(addr,o1,o2,n1,n2)
+# define AO_HAVE_compare_double_and_swap_double
+#endif
+
+#if defined(AO_HAVE_compare_double_and_swap_double_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_compare_double_and_swap_double_full)
+# define AO_compare_double_and_swap_double_full(addr,o1,o2,n1,n2) \
+ (AO_nop_full(), \
+ AO_compare_double_and_swap_double_acquire(addr,o1,o2,n1,n2))
+# define AO_HAVE_compare_double_and_swap_double_full
+#endif
+
+#if !defined(AO_HAVE_compare_double_and_swap_double_release_write) \
+ && defined(AO_HAVE_compare_double_and_swap_double_write)
+# define AO_compare_double_and_swap_double_release_write(addr,o1,o2,n1,n2) \
+ AO_compare_double_and_swap_double_write(addr,o1,o2,n1,n2)
+# define AO_HAVE_compare_double_and_swap_double_release_write
+#endif
+#if !defined(AO_HAVE_compare_double_and_swap_double_release_write) \
+ && defined(AO_HAVE_compare_double_and_swap_double_release)
+# define AO_compare_double_and_swap_double_release_write(addr,o1,o2,n1,n2) \
+ AO_compare_double_and_swap_double_release(addr,o1,o2,n1,n2)
+# define AO_HAVE_compare_double_and_swap_double_release_write
+#endif
+#if !defined(AO_HAVE_compare_double_and_swap_double_acquire_read) \
+ && defined(AO_HAVE_compare_double_and_swap_double_read)
+# define AO_compare_double_and_swap_double_acquire_read(addr,o1,o2,n1,n2) \
+ AO_compare_double_and_swap_double_read(addr,o1,o2,n1,n2)
+# define AO_HAVE_compare_double_and_swap_double_acquire_read
+#endif
+#if !defined(AO_HAVE_compare_double_and_swap_double_acquire_read) \
+ && defined(AO_HAVE_compare_double_and_swap_double_acquire)
+# define AO_compare_double_and_swap_double_acquire_read(addr,o1,o2,n1,n2) \
+ AO_compare_double_and_swap_double_acquire(addr,o1,o2,n1,n2)
+# define AO_HAVE_compare_double_and_swap_double_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_compare_double_and_swap_double_acquire_read)
+# define AO_compare_double_and_swap_double_dd_acquire_read(addr,o1,o2,n1,n2) \
+ AO_compare_double_and_swap_double_acquire_read(addr,o1,o2,n1,n2)
+# define AO_HAVE_compare_double_and_swap_double_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_compare_double_and_swap_double)
+# define AO_compare_double_and_swap_double_dd_acquire_read(addr,o1,o2,n1,n2) \
+ AO_compare_double_and_swap_double(addr,o1,o2,n1,n2)
+# define AO_HAVE_compare_double_and_swap_double_dd_acquire_read
+# endif
+#endif /* !AO_NO_DD_ORDERING */
+
+/* Compare_and_swap_double */
+#if defined(AO_HAVE_compare_and_swap_double) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_compare_and_swap_double_acquire)
+ AO_INLINE int
+ AO_compare_and_swap_double_acquire(volatile AO_double_t *addr,
+ AO_t o1,
+ AO_t n1, AO_t n2)
+ {
+ int result = AO_compare_and_swap_double(addr, o1, n1, n2);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_compare_and_swap_double_acquire
+#endif
+#if defined(AO_HAVE_compare_and_swap_double) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_compare_and_swap_double_release)
+# define AO_compare_and_swap_double_release(addr,o1,n1,n2) \
+ (AO_nop_full(), AO_compare_and_swap_double(addr,o1,n1,n2))
+# define AO_HAVE_compare_and_swap_double_release
+#endif
+#if defined(AO_HAVE_compare_and_swap_double_full)
+# if !defined(AO_HAVE_compare_and_swap_double_release)
+# define AO_compare_and_swap_double_release(addr,o1,n1,n2) \
+ AO_compare_and_swap_double_full(addr,o1,n1,n2)
+# define AO_HAVE_compare_and_swap_double_release
+# endif
+# if !defined(AO_HAVE_compare_and_swap_double_acquire)
+# define AO_compare_and_swap_double_acquire(addr,o1,n1,n2) \
+ AO_compare_and_swap_double_full(addr,o1,n1,n2)
+# define AO_HAVE_compare_and_swap_double_acquire
+# endif
+# if !defined(AO_HAVE_compare_and_swap_double_write)
+# define AO_compare_and_swap_double_write(addr,o1,n1,n2) \
+ AO_compare_and_swap_double_full(addr,o1,n1,n2)
+# define AO_HAVE_compare_and_swap_double_write
+# endif
+# if !defined(AO_HAVE_compare_and_swap_double_read)
+# define AO_compare_and_swap_double_read(addr,o1,n1,n2) \
+ AO_compare_and_swap_double_full(addr,o1,n1,n2)
+# define AO_HAVE_compare_and_swap_double_read
+# endif
+#endif /* AO_HAVE_compare_and_swap_double_full */
+
+#if !defined(AO_HAVE_compare_and_swap_double) \
+ && defined(AO_HAVE_compare_and_swap_double_release)
+# define AO_compare_and_swap_double(addr,o1,n1,n2) \
+ AO_compare_and_swap_double_release(addr,o1,n1,n2)
+# define AO_HAVE_compare_and_swap_double
+#endif
+#if !defined(AO_HAVE_compare_and_swap_double) \
+ && defined(AO_HAVE_compare_and_swap_double_acquire)
+# define AO_compare_and_swap_double(addr,o1,n1,n2) \
+ AO_compare_and_swap_double_acquire(addr,o1,n1,n2)
+# define AO_HAVE_compare_and_swap_double
+#endif
+#if !defined(AO_HAVE_compare_and_swap_double) \
+ && defined(AO_HAVE_compare_and_swap_double_write)
+# define AO_compare_and_swap_double(addr,o1,n1,n2) \
+ AO_compare_and_swap_double_write(addr,o1,n1,n2)
+# define AO_HAVE_compare_and_swap_double
+#endif
+#if !defined(AO_HAVE_compare_and_swap_double) \
+ && defined(AO_HAVE_compare_and_swap_double_read)
+# define AO_compare_and_swap_double(addr,o1,n1,n2) \
+ AO_compare_and_swap_double_read(addr,o1,n1,n2)
+# define AO_HAVE_compare_and_swap_double
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_double_acquire) \
+ && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_compare_and_swap_double_full)
+# define AO_compare_and_swap_double_full(addr,o1,n1,n2) \
+ (AO_nop_full(), AO_compare_and_swap_double_acquire(addr,o1,n1,n2))
+# define AO_HAVE_compare_and_swap_double_full
+#endif
+
+#if !defined(AO_HAVE_compare_and_swap_double_release_write) \
+ && defined(AO_HAVE_compare_and_swap_double_write)
+# define AO_compare_and_swap_double_release_write(addr,o1,n1,n2) \
+ AO_compare_and_swap_double_write(addr,o1,n1,n2)
+# define AO_HAVE_compare_and_swap_double_release_write
+#endif
+#if !defined(AO_HAVE_compare_and_swap_double_release_write) \
+ && defined(AO_HAVE_compare_and_swap_double_release)
+# define AO_compare_and_swap_double_release_write(addr,o1,n1,n2) \
+ AO_compare_and_swap_double_release(addr,o1,n1,n2)
+# define AO_HAVE_compare_and_swap_double_release_write
+#endif
+#if !defined(AO_HAVE_compare_and_swap_double_acquire_read) \
+ && defined(AO_HAVE_compare_and_swap_double_read)
+# define AO_compare_and_swap_double_acquire_read(addr,o1,n1,n2) \
+ AO_compare_and_swap_double_read(addr,o1,n1,n2)
+# define AO_HAVE_compare_and_swap_double_acquire_read
+#endif
+#if !defined(AO_HAVE_compare_and_swap_double_acquire_read) \
+ && defined(AO_HAVE_compare_and_swap_double_acquire)
+# define AO_compare_and_swap_double_acquire_read(addr,o1,n1,n2) \
+ AO_compare_and_swap_double_acquire(addr,o1,n1,n2)
+# define AO_HAVE_compare_and_swap_double_acquire_read
+#endif
+
+#ifdef AO_NO_DD_ORDERING
+# if defined(AO_HAVE_compare_and_swap_double_acquire_read)
+# define AO_compare_and_swap_double_dd_acquire_read(addr,o1,n1,n2) \
+ AO_compare_and_swap_double_acquire_read(addr,o1,n1,n2)
+# define AO_HAVE_compare_and_swap_double_dd_acquire_read
+# endif
+#else
+# if defined(AO_HAVE_compare_and_swap_double)
+# define AO_compare_and_swap_double_dd_acquire_read(addr,o1,n1,n2) \
+ AO_compare_and_swap_double(addr,o1,n1,n2)
+# define AO_HAVE_compare_and_swap_double_dd_acquire_read
+# endif
+#endif
+
+/* Convenience functions for AO_double compare-and-swap which types and */
+/* reads easier in code. */
+#if defined(AO_HAVE_compare_double_and_swap_double) \
+ && !defined(AO_HAVE_double_compare_and_swap)
+ AO_INLINE int
+ AO_double_compare_and_swap(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ return AO_compare_double_and_swap_double(addr,
+ old_val.AO_val1, old_val.AO_val2,
+ new_val.AO_val1, new_val.AO_val2);
+ }
+# define AO_HAVE_double_compare_and_swap
+#endif
+#if defined(AO_HAVE_compare_double_and_swap_double_release) \
+ && !defined(AO_HAVE_double_compare_and_swap_release)
+ AO_INLINE int
+ AO_double_compare_and_swap_release(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ return AO_compare_double_and_swap_double_release(addr,
+ old_val.AO_val1, old_val.AO_val2,
+ new_val.AO_val1, new_val.AO_val2);
+ }
+# define AO_HAVE_double_compare_and_swap_release
+#endif
+#if defined(AO_HAVE_compare_double_and_swap_double_acquire) \
+ && !defined(AO_HAVE_double_compare_and_swap_acquire)
+ AO_INLINE int
+ AO_double_compare_and_swap_acquire(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ return AO_compare_double_and_swap_double_acquire(addr,
+ old_val.AO_val1, old_val.AO_val2,
+ new_val.AO_val1, new_val.AO_val2);
+ }
+# define AO_HAVE_double_compare_and_swap_acquire
+#endif
+#if defined(AO_HAVE_compare_double_and_swap_double_read) \
+ && !defined(AO_HAVE_double_compare_and_swap_read)
+ AO_INLINE int
+ AO_double_compare_and_swap_read(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ return AO_compare_double_and_swap_double_read(addr,
+ old_val.AO_val1, old_val.AO_val2,
+ new_val.AO_val1, new_val.AO_val2);
+ }
+# define AO_HAVE_double_compare_and_swap_read
+#endif
+#if defined(AO_HAVE_compare_double_and_swap_double_write) \
+ && !defined(AO_HAVE_double_compare_and_swap_write)
+ AO_INLINE int
+ AO_double_compare_and_swap_write(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ return AO_compare_double_and_swap_double_write(addr,
+ old_val.AO_val1, old_val.AO_val2,
+ new_val.AO_val1, new_val.AO_val2);
+ }
+# define AO_HAVE_double_compare_and_swap_write
+#endif
+#if defined(AO_HAVE_compare_double_and_swap_double_release_write) \
+ && !defined(AO_HAVE_double_compare_and_swap_release_write)
+ AO_INLINE int
+ AO_double_compare_and_swap_release_write(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ return AO_compare_double_and_swap_double_release_write(addr,
+ old_val.AO_val1, old_val.AO_val2,
+ new_val.AO_val1, new_val.AO_val2);
+ }
+# define AO_HAVE_double_compare_and_swap_release_write
+#endif
+#if defined(AO_HAVE_compare_double_and_swap_double_acquire_read) \
+ && !defined(AO_HAVE_double_compare_and_swap_acquire_read)
+ AO_INLINE int
+ AO_double_compare_and_swap_acquire_read(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ return AO_compare_double_and_swap_double_acquire_read(addr,
+ old_val.AO_val1, old_val.AO_val2,
+ new_val.AO_val1, new_val.AO_val2);
+ }
+# define AO_HAVE_double_compare_and_swap_acquire_read
+#endif
+#if defined(AO_HAVE_compare_double_and_swap_double_full) \
+ && !defined(AO_HAVE_double_compare_and_swap_full)
+ AO_INLINE int
+ AO_double_compare_and_swap_full(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ return AO_compare_double_and_swap_double_full(addr,
+ old_val.AO_val1, old_val.AO_val2,
+ new_val.AO_val1, new_val.AO_val2);
+ }
+# define AO_HAVE_double_compare_and_swap_full
+#endif
--- /dev/null
+There are two kinds of entities in this directory:
+
+- Subdirectories corresponding to specific compilers (or compiler/OS combinations).
+ Each of these includes one or more architecture-specific headers.
+
+- More generic header files corresponding to a particular ordering and/or
+ atomicity property that might be shared by multiple hardware platforms.
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Describes architectures on which volatile AO_t, unsigned char, */
+/* unsigned short, and unsigned int loads and stores have */
+/* acquire/release semantics for all normally legal alignments. */
+
+#include "loadstore/acquire_release_volatile.h"
+#include "loadstore/char_acquire_release_volatile.h"
+#include "loadstore/short_acquire_release_volatile.h"
+#include "loadstore/int_acquire_release_volatile.h"
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Describes architectures on which AO_t, unsigned char, unsigned */
+/* short, and unsigned int loads and stores are atomic but only if data */
+/* is suitably aligned. */
+
+#define AO_ACCESS_CHECK_ALIGNED
+/* Check for char type is a misnomer. */
+#define AO_ACCESS_short_CHECK_ALIGNED
+#define AO_ACCESS_int_CHECK_ALIGNED
+#include "all_atomic_load_store.h"
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Describes architectures on which AO_t, unsigned char, unsigned */
+/* short, and unsigned int loads and stores are atomic for all normally */
+/* legal alignments. */
+
+#include "all_atomic_only_load.h"
+
+#include "loadstore/atomic_store.h"
+#include "loadstore/char_atomic_store.h"
+#include "loadstore/short_atomic_store.h"
+#include "loadstore/int_atomic_store.h"
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Describes architectures on which AO_t, unsigned char, unsigned */
+/* short, and unsigned int loads are atomic for all normally legal */
+/* alignments. */
+
+#include "loadstore/atomic_load.h"
+#include "loadstore/char_atomic_load.h"
+#include "loadstore/short_atomic_load.h"
+#include "loadstore/int_atomic_load.h"
--- /dev/null
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Inclusion of this file signifies that AO_t is in fact int. */
+/* Hence any AO_... operation can also serve as AO_int_... operation. */
+
+#if defined(AO_HAVE_load) && !defined(AO_HAVE_int_load)
+# define AO_int_load(addr) \
+ (unsigned)AO_load((const volatile AO_t *)(addr))
+# define AO_HAVE_int_load
+#endif
+
+#if defined(AO_HAVE_store) && !defined(AO_HAVE_int_store)
+# define AO_int_store(addr, val) \
+ AO_store((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_store
+#endif
+
+#if defined(AO_HAVE_fetch_and_add) \
+ && !defined(AO_HAVE_int_fetch_and_add)
+# define AO_int_fetch_and_add(addr, incr) \
+ (unsigned)AO_fetch_and_add((volatile AO_t *)(addr), \
+ (AO_t)(incr))
+# define AO_HAVE_int_fetch_and_add
+#endif
+
+#if defined(AO_HAVE_fetch_and_add1) \
+ && !defined(AO_HAVE_int_fetch_and_add1)
+# define AO_int_fetch_and_add1(addr) \
+ (unsigned)AO_fetch_and_add1((volatile AO_t *)(addr))
+# define AO_HAVE_int_fetch_and_add1
+#endif
+
+#if defined(AO_HAVE_fetch_and_sub1) \
+ && !defined(AO_HAVE_int_fetch_and_sub1)
+# define AO_int_fetch_and_sub1(addr) \
+ (unsigned)AO_fetch_and_sub1((volatile AO_t *)(addr))
+# define AO_HAVE_int_fetch_and_sub1
+#endif
+
+#if defined(AO_HAVE_and) && !defined(AO_HAVE_int_and)
+# define AO_int_and(addr, val) \
+ AO_and((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_and
+#endif
+
+#if defined(AO_HAVE_or) && !defined(AO_HAVE_int_or)
+# define AO_int_or(addr, val) \
+ AO_or((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_or
+#endif
+
+#if defined(AO_HAVE_xor) && !defined(AO_HAVE_int_xor)
+# define AO_int_xor(addr, val) \
+ AO_xor((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_xor
+#endif
+
+#if defined(AO_HAVE_fetch_compare_and_swap) \
+ && !defined(AO_HAVE_int_fetch_compare_and_swap)
+# define AO_int_fetch_compare_and_swap(addr, old, new_val) \
+ (unsigned)AO_fetch_compare_and_swap((volatile AO_t *)(addr), \
+ (AO_t)(old), (AO_t)(new_val))
+# define AO_HAVE_int_fetch_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_compare_and_swap) \
+ && !defined(AO_HAVE_int_compare_and_swap)
+# define AO_int_compare_and_swap(addr, old, new_val) \
+ AO_compare_and_swap((volatile AO_t *)(addr), \
+ (AO_t)(old), (AO_t)(new_val))
+# define AO_HAVE_int_compare_and_swap
+#endif
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Inclusion of this file signifies that AO_t is in fact int. */
+/* Hence any AO_... operation can also serve as AO_int_... operation. */
+
+#if defined(AO_HAVE_load_full) && !defined(AO_HAVE_int_load_full)
+# define AO_int_load_full(addr) \
+ (unsigned)AO_load_full((const volatile AO_t *)(addr))
+# define AO_HAVE_int_load_full
+#endif
+
+#if defined(AO_HAVE_store_full) && !defined(AO_HAVE_int_store_full)
+# define AO_int_store_full(addr, val) \
+ AO_store_full((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_store_full
+#endif
+
+#if defined(AO_HAVE_fetch_and_add_full) \
+ && !defined(AO_HAVE_int_fetch_and_add_full)
+# define AO_int_fetch_and_add_full(addr, incr) \
+ (unsigned)AO_fetch_and_add_full((volatile AO_t *)(addr), \
+ (AO_t)(incr))
+# define AO_HAVE_int_fetch_and_add_full
+#endif
+
+#if defined(AO_HAVE_fetch_and_add1_full) \
+ && !defined(AO_HAVE_int_fetch_and_add1_full)
+# define AO_int_fetch_and_add1_full(addr) \
+ (unsigned)AO_fetch_and_add1_full((volatile AO_t *)(addr))
+# define AO_HAVE_int_fetch_and_add1_full
+#endif
+
+#if defined(AO_HAVE_fetch_and_sub1_full) \
+ && !defined(AO_HAVE_int_fetch_and_sub1_full)
+# define AO_int_fetch_and_sub1_full(addr) \
+ (unsigned)AO_fetch_and_sub1_full((volatile AO_t *)(addr))
+# define AO_HAVE_int_fetch_and_sub1_full
+#endif
+
+#if defined(AO_HAVE_and_full) && !defined(AO_HAVE_int_and_full)
+# define AO_int_and_full(addr, val) \
+ AO_and_full((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_and_full
+#endif
+
+#if defined(AO_HAVE_or_full) && !defined(AO_HAVE_int_or_full)
+# define AO_int_or_full(addr, val) \
+ AO_or_full((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_or_full
+#endif
+
+#if defined(AO_HAVE_xor_full) && !defined(AO_HAVE_int_xor_full)
+# define AO_int_xor_full(addr, val) \
+ AO_xor_full((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_xor_full
+#endif
+
+#if defined(AO_HAVE_fetch_compare_and_swap_full) \
+ && !defined(AO_HAVE_int_fetch_compare_and_swap_full)
+# define AO_int_fetch_compare_and_swap_full(addr, old, new_val) \
+ (unsigned)AO_fetch_compare_and_swap_full((volatile AO_t *)(addr), \
+ (AO_t)(old), (AO_t)(new_val))
+# define AO_HAVE_int_fetch_compare_and_swap_full
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_full) \
+ && !defined(AO_HAVE_int_compare_and_swap_full)
+# define AO_int_compare_and_swap_full(addr, old, new_val) \
+ AO_compare_and_swap_full((volatile AO_t *)(addr), \
+ (AO_t)(old), (AO_t)(new_val))
+# define AO_HAVE_int_compare_and_swap_full
+#endif
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Inclusion of this file signifies that AO_t is in fact int. */
+/* Hence any AO_... operation can also serve as AO_int_... operation. */
+
+#if defined(AO_HAVE_load_acquire) && !defined(AO_HAVE_int_load_acquire)
+# define AO_int_load_acquire(addr) \
+ (unsigned)AO_load_acquire((const volatile AO_t *)(addr))
+# define AO_HAVE_int_load_acquire
+#endif
+
+#if defined(AO_HAVE_store_acquire) && !defined(AO_HAVE_int_store_acquire)
+# define AO_int_store_acquire(addr, val) \
+ AO_store_acquire((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_store_acquire
+#endif
+
+#if defined(AO_HAVE_fetch_and_add_acquire) \
+ && !defined(AO_HAVE_int_fetch_and_add_acquire)
+# define AO_int_fetch_and_add_acquire(addr, incr) \
+ (unsigned)AO_fetch_and_add_acquire((volatile AO_t *)(addr), \
+ (AO_t)(incr))
+# define AO_HAVE_int_fetch_and_add_acquire
+#endif
+
+#if defined(AO_HAVE_fetch_and_add1_acquire) \
+ && !defined(AO_HAVE_int_fetch_and_add1_acquire)
+# define AO_int_fetch_and_add1_acquire(addr) \
+ (unsigned)AO_fetch_and_add1_acquire((volatile AO_t *)(addr))
+# define AO_HAVE_int_fetch_and_add1_acquire
+#endif
+
+#if defined(AO_HAVE_fetch_and_sub1_acquire) \
+ && !defined(AO_HAVE_int_fetch_and_sub1_acquire)
+# define AO_int_fetch_and_sub1_acquire(addr) \
+ (unsigned)AO_fetch_and_sub1_acquire((volatile AO_t *)(addr))
+# define AO_HAVE_int_fetch_and_sub1_acquire
+#endif
+
+#if defined(AO_HAVE_and_acquire) && !defined(AO_HAVE_int_and_acquire)
+# define AO_int_and_acquire(addr, val) \
+ AO_and_acquire((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_and_acquire
+#endif
+
+#if defined(AO_HAVE_or_acquire) && !defined(AO_HAVE_int_or_acquire)
+# define AO_int_or_acquire(addr, val) \
+ AO_or_acquire((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_or_acquire
+#endif
+
+#if defined(AO_HAVE_xor_acquire) && !defined(AO_HAVE_int_xor_acquire)
+# define AO_int_xor_acquire(addr, val) \
+ AO_xor_acquire((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_xor_acquire
+#endif
+
+#if defined(AO_HAVE_fetch_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_int_fetch_compare_and_swap_acquire)
+# define AO_int_fetch_compare_and_swap_acquire(addr, old, new_val) \
+ (unsigned)AO_fetch_compare_and_swap_acquire((volatile AO_t *)(addr), \
+ (AO_t)(old), (AO_t)(new_val))
+# define AO_HAVE_int_fetch_compare_and_swap_acquire
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_acquire) \
+ && !defined(AO_HAVE_int_compare_and_swap_acquire)
+# define AO_int_compare_and_swap_acquire(addr, old, new_val) \
+ AO_compare_and_swap_acquire((volatile AO_t *)(addr), \
+ (AO_t)(old), (AO_t)(new_val))
+# define AO_HAVE_int_compare_and_swap_acquire
+#endif
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Inclusion of this file signifies that AO_t is in fact int. */
+/* Hence any AO_... operation can also serve as AO_int_... operation. */
+
+#if defined(AO_HAVE_load_release) && !defined(AO_HAVE_int_load_release)
+# define AO_int_load_release(addr) \
+ (unsigned)AO_load_release((const volatile AO_t *)(addr))
+# define AO_HAVE_int_load_release
+#endif
+
+#if defined(AO_HAVE_store_release) && !defined(AO_HAVE_int_store_release)
+# define AO_int_store_release(addr, val) \
+ AO_store_release((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_store_release
+#endif
+
+#if defined(AO_HAVE_fetch_and_add_release) \
+ && !defined(AO_HAVE_int_fetch_and_add_release)
+# define AO_int_fetch_and_add_release(addr, incr) \
+ (unsigned)AO_fetch_and_add_release((volatile AO_t *)(addr), \
+ (AO_t)(incr))
+# define AO_HAVE_int_fetch_and_add_release
+#endif
+
+#if defined(AO_HAVE_fetch_and_add1_release) \
+ && !defined(AO_HAVE_int_fetch_and_add1_release)
+# define AO_int_fetch_and_add1_release(addr) \
+ (unsigned)AO_fetch_and_add1_release((volatile AO_t *)(addr))
+# define AO_HAVE_int_fetch_and_add1_release
+#endif
+
+#if defined(AO_HAVE_fetch_and_sub1_release) \
+ && !defined(AO_HAVE_int_fetch_and_sub1_release)
+# define AO_int_fetch_and_sub1_release(addr) \
+ (unsigned)AO_fetch_and_sub1_release((volatile AO_t *)(addr))
+# define AO_HAVE_int_fetch_and_sub1_release
+#endif
+
+#if defined(AO_HAVE_and_release) && !defined(AO_HAVE_int_and_release)
+# define AO_int_and_release(addr, val) \
+ AO_and_release((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_and_release
+#endif
+
+#if defined(AO_HAVE_or_release) && !defined(AO_HAVE_int_or_release)
+# define AO_int_or_release(addr, val) \
+ AO_or_release((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_or_release
+#endif
+
+#if defined(AO_HAVE_xor_release) && !defined(AO_HAVE_int_xor_release)
+# define AO_int_xor_release(addr, val) \
+ AO_xor_release((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_xor_release
+#endif
+
+#if defined(AO_HAVE_fetch_compare_and_swap_release) \
+ && !defined(AO_HAVE_int_fetch_compare_and_swap_release)
+# define AO_int_fetch_compare_and_swap_release(addr, old, new_val) \
+ (unsigned)AO_fetch_compare_and_swap_release((volatile AO_t *)(addr), \
+ (AO_t)(old), (AO_t)(new_val))
+# define AO_HAVE_int_fetch_compare_and_swap_release
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_release) \
+ && !defined(AO_HAVE_int_compare_and_swap_release)
+# define AO_int_compare_and_swap_release(addr, old, new_val) \
+ AO_compare_and_swap_release((volatile AO_t *)(addr), \
+ (AO_t)(old), (AO_t)(new_val))
+# define AO_HAVE_int_compare_and_swap_release
+#endif
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Inclusion of this file signifies that AO_t is in fact int. */
+/* Hence any AO_... operation can also serve as AO_int_... operation. */
+
+#if defined(AO_HAVE_load_write) && !defined(AO_HAVE_int_load_write)
+# define AO_int_load_write(addr) \
+ (unsigned)AO_load_write((const volatile AO_t *)(addr))
+# define AO_HAVE_int_load_write
+#endif
+
+#if defined(AO_HAVE_store_write) && !defined(AO_HAVE_int_store_write)
+# define AO_int_store_write(addr, val) \
+ AO_store_write((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_store_write
+#endif
+
+#if defined(AO_HAVE_fetch_and_add_write) \
+ && !defined(AO_HAVE_int_fetch_and_add_write)
+# define AO_int_fetch_and_add_write(addr, incr) \
+ (unsigned)AO_fetch_and_add_write((volatile AO_t *)(addr), \
+ (AO_t)(incr))
+# define AO_HAVE_int_fetch_and_add_write
+#endif
+
+#if defined(AO_HAVE_fetch_and_add1_write) \
+ && !defined(AO_HAVE_int_fetch_and_add1_write)
+# define AO_int_fetch_and_add1_write(addr) \
+ (unsigned)AO_fetch_and_add1_write((volatile AO_t *)(addr))
+# define AO_HAVE_int_fetch_and_add1_write
+#endif
+
+#if defined(AO_HAVE_fetch_and_sub1_write) \
+ && !defined(AO_HAVE_int_fetch_and_sub1_write)
+# define AO_int_fetch_and_sub1_write(addr) \
+ (unsigned)AO_fetch_and_sub1_write((volatile AO_t *)(addr))
+# define AO_HAVE_int_fetch_and_sub1_write
+#endif
+
+#if defined(AO_HAVE_and_write) && !defined(AO_HAVE_int_and_write)
+# define AO_int_and_write(addr, val) \
+ AO_and_write((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_and_write
+#endif
+
+#if defined(AO_HAVE_or_write) && !defined(AO_HAVE_int_or_write)
+# define AO_int_or_write(addr, val) \
+ AO_or_write((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_or_write
+#endif
+
+#if defined(AO_HAVE_xor_write) && !defined(AO_HAVE_int_xor_write)
+# define AO_int_xor_write(addr, val) \
+ AO_xor_write((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_xor_write
+#endif
+
+#if defined(AO_HAVE_fetch_compare_and_swap_write) \
+ && !defined(AO_HAVE_int_fetch_compare_and_swap_write)
+# define AO_int_fetch_compare_and_swap_write(addr, old, new_val) \
+ (unsigned)AO_fetch_compare_and_swap_write((volatile AO_t *)(addr), \
+ (AO_t)(old), (AO_t)(new_val))
+# define AO_HAVE_int_fetch_compare_and_swap_write
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_write) \
+ && !defined(AO_HAVE_int_compare_and_swap_write)
+# define AO_int_compare_and_swap_write(addr, old, new_val) \
+ AO_compare_and_swap_write((volatile AO_t *)(addr), \
+ (AO_t)(old), (AO_t)(new_val))
+# define AO_HAVE_int_compare_and_swap_write
+#endif
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Inclusion of this file signifies that AO_t is in fact int. */
+/* Hence any AO_... operation can also serve as AO_int_... operation. */
+
+#if defined(AO_HAVE_load_read) && !defined(AO_HAVE_int_load_read)
+# define AO_int_load_read(addr) \
+ (unsigned)AO_load_read((const volatile AO_t *)(addr))
+# define AO_HAVE_int_load_read
+#endif
+
+#if defined(AO_HAVE_store_read) && !defined(AO_HAVE_int_store_read)
+# define AO_int_store_read(addr, val) \
+ AO_store_read((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_store_read
+#endif
+
+#if defined(AO_HAVE_fetch_and_add_read) \
+ && !defined(AO_HAVE_int_fetch_and_add_read)
+# define AO_int_fetch_and_add_read(addr, incr) \
+ (unsigned)AO_fetch_and_add_read((volatile AO_t *)(addr), \
+ (AO_t)(incr))
+# define AO_HAVE_int_fetch_and_add_read
+#endif
+
+#if defined(AO_HAVE_fetch_and_add1_read) \
+ && !defined(AO_HAVE_int_fetch_and_add1_read)
+# define AO_int_fetch_and_add1_read(addr) \
+ (unsigned)AO_fetch_and_add1_read((volatile AO_t *)(addr))
+# define AO_HAVE_int_fetch_and_add1_read
+#endif
+
+#if defined(AO_HAVE_fetch_and_sub1_read) \
+ && !defined(AO_HAVE_int_fetch_and_sub1_read)
+# define AO_int_fetch_and_sub1_read(addr) \
+ (unsigned)AO_fetch_and_sub1_read((volatile AO_t *)(addr))
+# define AO_HAVE_int_fetch_and_sub1_read
+#endif
+
+#if defined(AO_HAVE_and_read) && !defined(AO_HAVE_int_and_read)
+# define AO_int_and_read(addr, val) \
+ AO_and_read((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_and_read
+#endif
+
+#if defined(AO_HAVE_or_read) && !defined(AO_HAVE_int_or_read)
+# define AO_int_or_read(addr, val) \
+ AO_or_read((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_or_read
+#endif
+
+#if defined(AO_HAVE_xor_read) && !defined(AO_HAVE_int_xor_read)
+# define AO_int_xor_read(addr, val) \
+ AO_xor_read((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_xor_read
+#endif
+
+#if defined(AO_HAVE_fetch_compare_and_swap_read) \
+ && !defined(AO_HAVE_int_fetch_compare_and_swap_read)
+# define AO_int_fetch_compare_and_swap_read(addr, old, new_val) \
+ (unsigned)AO_fetch_compare_and_swap_read((volatile AO_t *)(addr), \
+ (AO_t)(old), (AO_t)(new_val))
+# define AO_HAVE_int_fetch_compare_and_swap_read
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_read) \
+ && !defined(AO_HAVE_int_compare_and_swap_read)
+# define AO_int_compare_and_swap_read(addr, old, new_val) \
+ AO_compare_and_swap_read((volatile AO_t *)(addr), \
+ (AO_t)(old), (AO_t)(new_val))
+# define AO_HAVE_int_compare_and_swap_read
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Inclusion of this file signifies that AO_t is in fact int. */
+/* Hence any AO_... operation can also serve as AO_int_... operation. */
+
+#if defined(AO_HAVE_load_XBAR) && !defined(AO_HAVE_int_load_XBAR)
+# define AO_int_load_XBAR(addr) \
+ (unsigned)AO_load_XBAR((const volatile AO_t *)(addr))
+# define AO_HAVE_int_load_XBAR
+#endif
+
+#if defined(AO_HAVE_store_XBAR) && !defined(AO_HAVE_int_store_XBAR)
+# define AO_int_store_XBAR(addr, val) \
+ AO_store_XBAR((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_store_XBAR
+#endif
+
+#if defined(AO_HAVE_fetch_and_add_XBAR) \
+ && !defined(AO_HAVE_int_fetch_and_add_XBAR)
+# define AO_int_fetch_and_add_XBAR(addr, incr) \
+ (unsigned)AO_fetch_and_add_XBAR((volatile AO_t *)(addr), \
+ (AO_t)(incr))
+# define AO_HAVE_int_fetch_and_add_XBAR
+#endif
+
+#if defined(AO_HAVE_fetch_and_add1_XBAR) \
+ && !defined(AO_HAVE_int_fetch_and_add1_XBAR)
+# define AO_int_fetch_and_add1_XBAR(addr) \
+ (unsigned)AO_fetch_and_add1_XBAR((volatile AO_t *)(addr))
+# define AO_HAVE_int_fetch_and_add1_XBAR
+#endif
+
+#if defined(AO_HAVE_fetch_and_sub1_XBAR) \
+ && !defined(AO_HAVE_int_fetch_and_sub1_XBAR)
+# define AO_int_fetch_and_sub1_XBAR(addr) \
+ (unsigned)AO_fetch_and_sub1_XBAR((volatile AO_t *)(addr))
+# define AO_HAVE_int_fetch_and_sub1_XBAR
+#endif
+
+#if defined(AO_HAVE_and_XBAR) && !defined(AO_HAVE_int_and_XBAR)
+# define AO_int_and_XBAR(addr, val) \
+ AO_and_XBAR((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_and_XBAR
+#endif
+
+#if defined(AO_HAVE_or_XBAR) && !defined(AO_HAVE_int_or_XBAR)
+# define AO_int_or_XBAR(addr, val) \
+ AO_or_XBAR((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_or_XBAR
+#endif
+
+#if defined(AO_HAVE_xor_XBAR) && !defined(AO_HAVE_int_xor_XBAR)
+# define AO_int_xor_XBAR(addr, val) \
+ AO_xor_XBAR((volatile AO_t *)(addr), (AO_t)(val))
+# define AO_HAVE_int_xor_XBAR
+#endif
+
+#if defined(AO_HAVE_fetch_compare_and_swap_XBAR) \
+ && !defined(AO_HAVE_int_fetch_compare_and_swap_XBAR)
+# define AO_int_fetch_compare_and_swap_XBAR(addr, old, new_val) \
+ (unsigned)AO_fetch_compare_and_swap_XBAR((volatile AO_t *)(addr), \
+ (AO_t)(old), (AO_t)(new_val))
+# define AO_HAVE_int_fetch_compare_and_swap_XBAR
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_XBAR) \
+ && !defined(AO_HAVE_int_compare_and_swap_XBAR)
+# define AO_int_compare_and_swap_XBAR(addr, old, new_val) \
+ AO_compare_and_swap_XBAR((volatile AO_t *)(addr), \
+ (AO_t)(old), (AO_t)(new_val))
+# define AO_HAVE_int_compare_and_swap_XBAR
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2007 by NEC LE-IT: All rights reserved.
+ * A transcription of ARMv6 atomic operations for the ARM Realview Toolchain.
+ * This code works with armcc from RVDS 3.1
+ * This is based on work in gcc/arm.h by
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+#include "../test_and_set_t_is_ao_t.h" /* Probably suboptimal */
+
+#if __TARGET_ARCH_ARM < 6
+Dont use with ARM instruction sets lower than v6
+#else
+
+#define AO_ACCESS_CHECK_ALIGNED
+#define AO_ACCESS_short_CHECK_ALIGNED
+#define AO_ACCESS_int_CHECK_ALIGNED
+#include "../all_atomic_only_load.h"
+
+#include "../standard_ao_double_t.h"
+
+/* NEC LE-IT: ARMv6 is the first architecture providing support for simple LL/SC
+ * A data memory barrier must be raised via CP15 command (see documentation).
+ *
+ * ARMv7 is compatible to ARMv6 but has a simpler command for issuing a
+ * memory barrier (DMB). Raising it via CP15 should still work as told me by the
+ * support engineers. If it turns out to be much quicker than we should implement
+ * custom code for ARMv7 using the asm { dmb } command.
+ *
+ * If only a single processor is used, we can define AO_UNIPROCESSOR
+ * and do not need to access CP15 for ensuring a DMB at all.
+*/
+
+AO_INLINE void
+AO_nop_full(void)
+{
+# ifndef AO_UNIPROCESSOR
+ unsigned int dest=0;
+ /* issue an data memory barrier (keeps ordering of memory transactions */
+ /* before and after this operation) */
+ __asm {
+ mcr p15,0,dest,c7,c10,5
+ };
+# else
+ AO_compiler_barrier();
+# endif
+}
+#define AO_HAVE_nop_full
+
+/* NEC LE-IT: atomic "store" - according to ARM documentation this is
+ * the only safe way to set variables also used in LL/SC environment.
+ * A direct write won't be recognized by the LL/SC construct in other CPUs.
+ *
+ * HB: Based on subsequent discussion, I think it would be OK to use an
+ * ordinary store here if we knew that interrupt handlers always cleared
+ * the reservation. They should, but there is some doubt that this is
+ * currently always the case for e.g. Linux.
+*/
+AO_INLINE void AO_store(volatile AO_t *addr, AO_t value)
+{
+ unsigned long tmp;
+
+retry:
+__asm {
+ ldrex tmp, [addr]
+ strex tmp, value, [addr]
+ teq tmp, #0
+ bne retry
+ };
+}
+#define AO_HAVE_store
+
+/* NEC LE-IT: replace the SWAP as recommended by ARM:
+
+ "Applies to: ARM11 Cores
+ Though the SWP instruction will still work with ARM V6 cores, it is recommended
+ to use the new V6 synchronization instructions. The SWP instruction produces
+ locked read and write accesses which are atomic, i.e. another operation cannot
+ be done between these locked accesses which ties up external bus (AHB,AXI)
+ bandwidth and can increase worst case interrupt latencies. LDREX,STREX are
+ more flexible, other instructions can be done between the LDREX and STREX accesses.
+ "
+*/
+#ifndef AO_PREFER_GENERALIZED
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set(volatile AO_TS_t *addr) {
+
+ AO_TS_VAL_t oldval;
+ unsigned long tmp;
+ unsigned long one = 1;
+retry:
+__asm {
+ ldrex oldval, [addr]
+ strex tmp, one, [addr]
+ teq tmp, #0
+ bne retry
+ }
+
+ return oldval;
+}
+#define AO_HAVE_test_and_set
+
+AO_INLINE AO_t
+AO_fetch_and_add(volatile AO_t *p, AO_t incr)
+{
+ unsigned long tmp,tmp2;
+ AO_t result;
+
+retry:
+__asm {
+ ldrex result, [p]
+ add tmp, incr, result
+ strex tmp2, tmp, [p]
+ teq tmp2, #0
+ bne retry
+ }
+
+ return result;
+}
+#define AO_HAVE_fetch_and_add
+
+AO_INLINE AO_t
+AO_fetch_and_add1(volatile AO_t *p)
+{
+ unsigned long tmp,tmp2;
+ AO_t result;
+
+retry:
+__asm {
+ ldrex result, [p]
+ add tmp, result, #1
+ strex tmp2, tmp, [p]
+ teq tmp2, #0
+ bne retry
+ }
+
+ return result;
+}
+#define AO_HAVE_fetch_and_add1
+
+AO_INLINE AO_t
+AO_fetch_and_sub1(volatile AO_t *p)
+{
+ unsigned long tmp,tmp2;
+ AO_t result;
+
+retry:
+__asm {
+ ldrex result, [p]
+ sub tmp, result, #1
+ strex tmp2, tmp, [p]
+ teq tmp2, #0
+ bne retry
+ }
+
+ return result;
+}
+#define AO_HAVE_fetch_and_sub1
+#endif /* !AO_PREFER_GENERALIZED */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ /* Returns nonzero if the comparison succeeded. */
+ AO_INLINE int
+ AO_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
+ {
+ AO_t result, tmp;
+
+ retry:
+ __asm__ {
+ mov result, #2
+ ldrex tmp, [addr]
+ teq tmp, old_val
+# ifdef __thumb__
+ it eq
+# endif
+ strexeq result, new_val, [addr]
+ teq result, #1
+ beq retry
+ }
+ return !(result&2);
+ }
+# define AO_HAVE_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
+{
+ AO_t fetched_val, tmp;
+
+retry:
+__asm__ {
+ mov tmp, #2
+ ldrex fetched_val, [addr]
+ teq fetched_val, old_val
+# ifdef __thumb__
+ it eq
+# endif
+ strexeq tmp, new_val, [addr]
+ teq tmp, #1
+ beq retry
+ }
+ return fetched_val;
+}
+#define AO_HAVE_fetch_compare_and_swap
+
+/* helper functions for the Realview compiler: LDREXD is not usable
+ * with inline assembler, so use the "embedded" assembler as
+ * suggested by ARM Dev. support (June 2008). */
+__asm inline double_ptr_storage AO_load_ex(const volatile AO_double_t *addr) {
+ LDREXD r0,r1,[r0]
+}
+
+__asm inline int AO_store_ex(AO_t val1, AO_t val2, volatile AO_double_t *addr) {
+ STREXD r3,r0,r1,[r2]
+ MOV r0,r3
+}
+
+AO_INLINE AO_double_t
+AO_double_load(const volatile AO_double_t *addr)
+{
+ AO_double_t result;
+
+ result.AO_whole = AO_load_ex(addr);
+ return result;
+}
+#define AO_HAVE_double_load
+
+AO_INLINE int
+AO_compare_double_and_swap_double(volatile AO_double_t *addr,
+ AO_t old_val1, AO_t old_val2,
+ AO_t new_val1, AO_t new_val2)
+{
+ double_ptr_storage old_val =
+ ((double_ptr_storage)old_val2 << 32) | old_val1;
+ double_ptr_storage tmp;
+ int result;
+
+ while(1) {
+ tmp = AO_load_ex(addr);
+ if(tmp != old_val) return 0;
+ result = AO_store_ex(new_val1, new_val2, addr);
+ if(!result) return 1;
+ }
+}
+#define AO_HAVE_compare_double_and_swap_double
+
+#endif /* __TARGET_ARCH_ARM >= 6 */
+
+#define AO_T_IS_INT
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * Ensure, if at all possible, that AO_compare_and_swap_full() is
+ * available. The emulation should be brute-force signal-safe, even
+ * though it actually blocks.
+ * Including this file will generate an error if AO_compare_and_swap_full()
+ * cannot be made available.
+ * This will be included from platform-specific atomic_ops files
+ * if appropriate, and if AO_REQUIRE_CAS is defined. It should not be
+ * included directly, especially since it affects the implementation
+ * of other atomic update primitives.
+ * The implementation assumes that only AO_store_XXX and AO_test_and_set_XXX
+ * variants are defined, and that AO_test_and_set_XXX is not used to
+ * operate on compare_and_swap locations.
+ */
+
+#ifndef AO_ATOMIC_OPS_H
+# error This file should not be included directly.
+#endif
+
+#ifndef AO_HAVE_double_t
+# include "standard_ao_double_t.h"
+#endif
+
+AO_t AO_fetch_compare_and_swap_emulation(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val);
+
+int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr,
+ AO_t old_val1, AO_t old_val2,
+ AO_t new_val1, AO_t new_val2);
+
+void AO_store_full_emulation(volatile AO_t *addr, AO_t val);
+
+#ifndef AO_HAVE_fetch_compare_and_swap_full
+# define AO_fetch_compare_and_swap_full(addr, old, newval) \
+ AO_fetch_compare_and_swap_emulation(addr, old, newval)
+# define AO_HAVE_fetch_compare_and_swap_full
+#endif
+
+#ifndef AO_HAVE_compare_double_and_swap_double_full
+# define AO_compare_double_and_swap_double_full(addr, old1, old2, \
+ newval1, newval2) \
+ AO_compare_double_and_swap_double_emulation(addr, old1, old2, \
+ newval1, newval2)
+# define AO_HAVE_compare_double_and_swap_double_full
+#endif
+
+#undef AO_store
+#undef AO_HAVE_store
+#undef AO_store_write
+#undef AO_HAVE_store_write
+#undef AO_store_release
+#undef AO_HAVE_store_release
+#undef AO_store_full
+#undef AO_HAVE_store_full
+#define AO_store_full(addr, val) AO_store_full_emulation(addr, val)
+#define AO_HAVE_store_full
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+#include "../test_and_set_t_is_ao_t.h"
+
+#include "../standard_ao_double_t.h"
+
+#ifndef AO_UNIPROCESSOR
+ AO_INLINE void
+ AO_nop_write(void)
+ {
+ /* TODO: Use C++11 primitive. */
+ __asm__ __volatile__("dmb ishst" : : : "memory");
+ }
+# define AO_HAVE_nop_write
+#endif
+
+/* TODO: Adjust version check on fixing double-wide AO support in GCC. */
+#if __GNUC__ == 4
+
+ AO_INLINE AO_double_t
+ AO_double_load(const volatile AO_double_t *addr)
+ {
+ AO_double_t result;
+ int status;
+
+ /* Note that STXP cannot be discarded because LD[A]XP is not */
+ /* single-copy atomic (unlike LDREXD for 32-bit ARM). */
+ do {
+ __asm__ __volatile__("//AO_double_load\n"
+ " ldxp %0, %1, %3\n"
+ " stxp %w2, %0, %1, %3"
+ : "=&r" (result.AO_val1), "=&r" (result.AO_val2), "=&r" (status)
+ : "Q" (*addr));
+ } while (AO_EXPECT_FALSE(status));
+ return result;
+ }
+# define AO_HAVE_double_load
+
+ AO_INLINE AO_double_t
+ AO_double_load_acquire(const volatile AO_double_t *addr)
+ {
+ AO_double_t result;
+ int status;
+
+ do {
+ __asm__ __volatile__("//AO_double_load_acquire\n"
+ " ldaxp %0, %1, %3\n"
+ " stxp %w2, %0, %1, %3"
+ : "=&r" (result.AO_val1), "=&r" (result.AO_val2), "=&r" (status)
+ : "Q" (*addr));
+ } while (AO_EXPECT_FALSE(status));
+ return result;
+ }
+# define AO_HAVE_double_load_acquire
+
+ AO_INLINE void
+ AO_double_store(volatile AO_double_t *addr, AO_double_t value)
+ {
+ AO_double_t old_val;
+ int status;
+
+ do {
+ __asm__ __volatile__("//AO_double_store\n"
+ " ldxp %0, %1, %3\n"
+ " stxp %w2, %4, %5, %3"
+ : "=&r" (old_val.AO_val1), "=&r" (old_val.AO_val2), "=&r" (status),
+ "=Q" (*addr)
+ : "r" (value.AO_val1), "r" (value.AO_val2));
+ /* Compared to the arm.h implementation, the 'cc' (flags) are not */
+ /* clobbered because A64 has no concept of conditional execution. */
+ } while (AO_EXPECT_FALSE(status));
+ }
+# define AO_HAVE_double_store
+
+ AO_INLINE void
+ AO_double_store_release(volatile AO_double_t *addr, AO_double_t value)
+ {
+ AO_double_t old_val;
+ int status;
+
+ do {
+ __asm__ __volatile__("//AO_double_store_release\n"
+ " ldxp %0, %1, %3\n"
+ " stlxp %w2, %4, %5, %3"
+ : "=&r" (old_val.AO_val1), "=&r" (old_val.AO_val2), "=&r" (status),
+ "=Q" (*addr)
+ : "r" (value.AO_val1), "r" (value.AO_val2));
+ } while (AO_EXPECT_FALSE(status));
+ }
+# define AO_HAVE_double_store_release
+
+ AO_INLINE int
+ AO_double_compare_and_swap(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ AO_double_t tmp;
+ int result = 1;
+
+ do {
+ __asm__ __volatile__("//AO_double_compare_and_swap\n"
+ " ldxp %0, %1, %2\n"
+ : "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
+ : "Q" (*addr));
+ if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
+ break;
+ __asm__ __volatile__(
+ " stxp %w0, %2, %3, %1\n"
+ : "=&r" (result), "=Q" (*addr)
+ : "r" (new_val.AO_val1), "r" (new_val.AO_val2));
+ } while (AO_EXPECT_FALSE(result));
+ return !result;
+ }
+# define AO_HAVE_double_compare_and_swap
+
+ AO_INLINE int
+ AO_double_compare_and_swap_acquire(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ AO_double_t tmp;
+ int result = 1;
+
+ do {
+ __asm__ __volatile__("//AO_double_compare_and_swap_acquire\n"
+ " ldaxp %0, %1, %2\n"
+ : "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
+ : "Q" (*addr));
+ if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
+ break;
+ __asm__ __volatile__(
+ " stxp %w0, %2, %3, %1\n"
+ : "=&r" (result), "=Q" (*addr)
+ : "r" (new_val.AO_val1), "r" (new_val.AO_val2));
+ } while (AO_EXPECT_FALSE(result));
+ return !result;
+ }
+# define AO_HAVE_double_compare_and_swap_acquire
+
+ AO_INLINE int
+ AO_double_compare_and_swap_release(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ AO_double_t tmp;
+ int result = 1;
+
+ do {
+ __asm__ __volatile__("//AO_double_compare_and_swap_release\n"
+ " ldxp %0, %1, %2\n"
+ : "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
+ : "Q" (*addr));
+ if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
+ break;
+ __asm__ __volatile__(
+ " stlxp %w0, %2, %3, %1\n"
+ : "=&r" (result), "=Q" (*addr)
+ : "r" (new_val.AO_val1), "r" (new_val.AO_val2));
+ } while (AO_EXPECT_FALSE(result));
+ return !result;
+ }
+# define AO_HAVE_double_compare_and_swap_release
+
+ AO_INLINE int
+ AO_double_compare_and_swap_full(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ AO_double_t tmp;
+ int result = 1;
+
+ do {
+ __asm__ __volatile__("//AO_double_compare_and_swap_full\n"
+ " ldaxp %0, %1, %2\n"
+ : "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
+ : "Q" (*addr));
+ if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
+ break;
+ __asm__ __volatile__(
+ " stlxp %w0, %2, %3, %1\n"
+ : "=&r" (result), "=Q" (*addr)
+ : "r" (new_val.AO_val1), "r" (new_val.AO_val2));
+ } while (AO_EXPECT_FALSE(result));
+ return !result;
+ }
+# define AO_HAVE_double_compare_and_swap_full
+#endif /* __GNUC__ == 4 */
+
+#include "generic.h"
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+#include "../loadstore/atomic_load.h"
+#include "../loadstore/atomic_store.h"
+
+#include "../test_and_set_t_is_ao_t.h"
+
+#define AO_NO_DD_ORDERING
+ /* Data dependence does not imply read ordering. */
+
+AO_INLINE void
+AO_nop_full(void)
+{
+ __asm__ __volatile__("mb" : : : "memory");
+}
+#define AO_HAVE_nop_full
+
+AO_INLINE void
+AO_nop_write(void)
+{
+ __asm__ __volatile__("wmb" : : : "memory");
+}
+#define AO_HAVE_nop_write
+
+/* mb should be used for AO_nop_read(). That's the default. */
+
+/* TODO: implement AO_fetch_and_add explicitly. */
+
+/* We believe that ldq_l ... stq_c does not imply any memory barrier. */
+AO_INLINE int
+AO_compare_and_swap(volatile AO_t *addr,
+ AO_t old, AO_t new_val)
+{
+ unsigned long was_equal;
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ "1: ldq_l %0,%1\n"
+ " cmpeq %0,%4,%2\n"
+ " mov %3,%0\n"
+ " beq %2,2f\n"
+ " stq_c %0,%1\n"
+ " beq %0,1b\n"
+ "2:\n"
+ : "=&r" (temp), "+m" (*addr), "=&r" (was_equal)
+ : "r" (new_val), "Ir" (old)
+ :"memory");
+ return (int)was_equal;
+}
+#define AO_HAVE_compare_and_swap
+
+/* TODO: implement AO_fetch_compare_and_swap */
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+#include "../test_and_set_t_is_ao_t.h" /* Probably suboptimal */
+
+#if defined(__thumb__) && !defined(__thumb2__)
+ /* Thumb One mode does not have ARM "mcr", "swp" and some load/store */
+ /* instructions, so we temporarily switch to ARM mode and go back */
+ /* afterwards (clobbering "r3" register). */
+# define AO_THUMB_GO_ARM \
+ " adr r3, 4f\n" \
+ " bx r3\n" \
+ " .align\n" \
+ " .arm\n" \
+ "4:\n"
+# define AO_THUMB_RESTORE_MODE \
+ " adr r3, 5f + 1\n" \
+ " bx r3\n" \
+ " .thumb\n" \
+ "5:\n"
+# define AO_THUMB_SWITCH_CLOBBERS "r3",
+#else
+# define AO_THUMB_GO_ARM /* empty */
+# define AO_THUMB_RESTORE_MODE /* empty */
+# define AO_THUMB_SWITCH_CLOBBERS /* empty */
+#endif /* !__thumb__ */
+
+/* NEC LE-IT: gcc has no way to easily check the arm architecture */
+/* but it defines only one (or several) of __ARM_ARCH_x__ to be true. */
+#if !defined(__ARM_ARCH_2__) && !defined(__ARM_ARCH_3__) \
+ && !defined(__ARM_ARCH_3M__) && !defined(__ARM_ARCH_4__) \
+ && !defined(__ARM_ARCH_4T__) \
+ && ((!defined(__ARM_ARCH_5__) && !defined(__ARM_ARCH_5E__) \
+ && !defined(__ARM_ARCH_5T__) && !defined(__ARM_ARCH_5TE__) \
+ && !defined(__ARM_ARCH_5TEJ__) && !defined(__ARM_ARCH_6M__)) \
+ || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_8A__))
+# define AO_ARM_HAVE_LDREX
+# if !defined(__ARM_ARCH_6__) && !defined(__ARM_ARCH_6J__) \
+ && !defined(__ARM_ARCH_6T2__)
+ /* LDREXB/STREXB and LDREXH/STREXH are present in ARMv6K/Z+. */
+# define AO_ARM_HAVE_LDREXBH
+# endif
+# if !defined(__ARM_ARCH_6__) && !defined(__ARM_ARCH_6J__) \
+ && !defined(__ARM_ARCH_6T2__) && !defined(__ARM_ARCH_6Z__) \
+ && !defined(__ARM_ARCH_6ZT2__)
+# if !defined(__ARM_ARCH_6K__) && !defined(__ARM_ARCH_6ZK__)
+ /* DMB is present in ARMv6M and ARMv7+. */
+# define AO_ARM_HAVE_DMB
+# endif
+# if (!defined(__thumb__) \
+ || (defined(__thumb2__) && !defined(__ARM_ARCH_7__) \
+ && !defined(__ARM_ARCH_7M__) && !defined(__ARM_ARCH_7EM__))) \
+ && (!defined(__clang__) || (__clang_major__ > 3) \
+ || (__clang_major__ == 3 && __clang_minor__ >= 3))
+ /* LDREXD/STREXD present in ARMv6K/M+ (see gas/config/tc-arm.c). */
+ /* In the Thumb mode, this works only starting from ARMv7 (except */
+ /* for the base and 'M' models). Clang3.2 (and earlier) does not */
+ /* allocate register pairs for LDREXD/STREXD properly (besides, */
+ /* Clang3.1 does not support "%H<r>" operand specification). */
+# define AO_ARM_HAVE_LDREXD
+# endif /* !thumb || ARMv7A || ARMv7R+ */
+# endif /* ARMv7+ */
+#endif /* ARMv6+ */
+
+#if !defined(__ARM_ARCH_2__) && !defined(__ARM_ARCH_6M__) \
+ && !defined(__ARM_ARCH_8A__) && !defined(__thumb2__)
+# define AO_ARM_HAVE_SWP
+ /* Note: ARMv6M is excluded due to no ARM mode support. */
+ /* Also, SWP is obsoleted for ARMv8+. */
+#endif /* !__thumb2__ */
+
+#ifdef AO_UNIPROCESSOR
+ /* If only a single processor (core) is used, AO_UNIPROCESSOR could */
+ /* be defined by the client to avoid unnecessary memory barrier. */
+ AO_INLINE void
+ AO_nop_full(void)
+ {
+ AO_compiler_barrier();
+ }
+# define AO_HAVE_nop_full
+
+#elif defined(AO_ARM_HAVE_DMB)
+ /* ARMv7 is compatible to ARMv6 but has a simpler command for issuing */
+ /* a memory barrier (DMB). Raising it via CP15 should still work */
+ /* (but slightly less efficient because it requires the use of */
+ /* a general-purpose register). */
+ AO_INLINE void
+ AO_nop_full(void)
+ {
+ /* AO_THUMB_GO_ARM is empty. */
+ __asm__ __volatile__("dmb" : : : "memory");
+ }
+# define AO_HAVE_nop_full
+
+ AO_INLINE void
+ AO_nop_write(void)
+ {
+ /* AO_THUMB_GO_ARM is empty. */
+ /* This will target the system domain and thus be overly */
+ /* conservative as the CPUs will occupy the inner shareable domain. */
+ /* The plain variant (dmb st) is theoretically slower, and should */
+ /* not be needed. That said, with limited experimentation, a CPU */
+ /* implementation for which it actually matters has not been found */
+ /* yet, though they should already exist. */
+ /* Anyway, note that the "st" and "ishst" barriers are actually */
+ /* quite weak and, as the libatomic_ops documentation states, */
+ /* usually not what you really want. */
+ __asm__ __volatile__("dmb ishst" : : : "memory");
+ }
+# define AO_HAVE_nop_write
+
+#elif defined(AO_ARM_HAVE_LDREX)
+ /* ARMv6 is the first architecture providing support for a simple */
+ /* LL/SC. A data memory barrier must be raised via CP15 command. */
+ AO_INLINE void
+ AO_nop_full(void)
+ {
+ unsigned dest = 0;
+
+ /* Issue a data memory barrier (keeps ordering of memory */
+ /* transactions before and after this operation). */
+ __asm__ __volatile__("@AO_nop_full\n"
+ AO_THUMB_GO_ARM
+ " mcr p15,0,%0,c7,c10,5\n"
+ AO_THUMB_RESTORE_MODE
+ : "=&r"(dest)
+ : /* empty */
+ : AO_THUMB_SWITCH_CLOBBERS "memory");
+ }
+# define AO_HAVE_nop_full
+
+#else
+ /* AO_nop_full() is emulated using AO_test_and_set_full(). */
+#endif /* !AO_UNIPROCESSOR && !AO_ARM_HAVE_LDREX */
+
+#ifdef AO_ARM_HAVE_LDREX
+
+ /* AO_t/char/short/int load is simple reading. */
+ /* Unaligned accesses are not guaranteed to be atomic. */
+# define AO_ACCESS_CHECK_ALIGNED
+# define AO_ACCESS_short_CHECK_ALIGNED
+# define AO_ACCESS_int_CHECK_ALIGNED
+# include "../all_atomic_only_load.h"
+
+ /* "ARM Architecture Reference Manual" (chapter A3.5.3) says that the */
+ /* single-copy atomic processor accesses are all byte accesses, all */
+ /* halfword accesses to halfword-aligned locations, all word accesses */
+ /* to word-aligned locations. */
+ /* There is only a single concern related to AO store operations: */
+ /* a direct write (by STR[B/H] instruction) will not be recognized */
+ /* by the LL/SC construct on the same CPU (i.e., according to ARM */
+ /* documentation, e.g., see CortexA8 TRM reference, point 8.5, */
+ /* atomic "store" (using LDREX/STREX[B/H]) is the only safe way to */
+ /* set variables also used in LL/SC environment). */
+ /* This is only a problem if interrupt handlers do not clear the */
+ /* reservation (by CLREX instruction or a dummy STREX one), as they */
+ /* almost certainly should (e.g., see restore_user_regs defined in */
+ /* arch/arm/kernel/entry-header.S of Linux. Nonetheless, there is */
+ /* a doubt this was properly implemented in some ancient OS releases. */
+# ifdef AO_BROKEN_TASKSWITCH_CLREX
+ AO_INLINE void AO_store(volatile AO_t *addr, AO_t value)
+ {
+ int flag;
+
+ __asm__ __volatile__("@AO_store\n"
+ AO_THUMB_GO_ARM
+ "1: ldrex %0, [%2]\n"
+ " strex %0, %3, [%2]\n"
+ " teq %0, #0\n"
+ " bne 1b\n"
+ AO_THUMB_RESTORE_MODE
+ : "=&r" (flag), "+m" (*addr)
+ : "r" (addr), "r" (value)
+ : AO_THUMB_SWITCH_CLOBBERS "cc");
+ }
+# define AO_HAVE_store
+
+# ifdef AO_ARM_HAVE_LDREXBH
+ AO_INLINE void AO_char_store(volatile unsigned char *addr,
+ unsigned char value)
+ {
+ int flag;
+
+ __asm__ __volatile__("@AO_char_store\n"
+ AO_THUMB_GO_ARM
+ "1: ldrexb %0, [%2]\n"
+ " strexb %0, %3, [%2]\n"
+ " teq %0, #0\n"
+ " bne 1b\n"
+ AO_THUMB_RESTORE_MODE
+ : "=&r" (flag), "+m" (*addr)
+ : "r" (addr), "r" (value)
+ : AO_THUMB_SWITCH_CLOBBERS "cc");
+ }
+# define AO_HAVE_char_store
+
+ AO_INLINE void AO_short_store(volatile unsigned short *addr,
+ unsigned short value)
+ {
+ int flag;
+
+ __asm__ __volatile__("@AO_short_store\n"
+ AO_THUMB_GO_ARM
+ "1: ldrexh %0, [%2]\n"
+ " strexh %0, %3, [%2]\n"
+ " teq %0, #0\n"
+ " bne 1b\n"
+ AO_THUMB_RESTORE_MODE
+ : "=&r" (flag), "+m" (*addr)
+ : "r" (addr), "r" (value)
+ : AO_THUMB_SWITCH_CLOBBERS "cc");
+ }
+# define AO_HAVE_short_store
+# endif /* AO_ARM_HAVE_LDREXBH */
+
+# else
+# include "../loadstore/atomic_store.h"
+ /* AO_int_store is defined in ao_t_is_int.h. */
+# endif /* !AO_BROKEN_TASKSWITCH_CLREX */
+
+# ifndef AO_HAVE_char_store
+# include "../loadstore/char_atomic_store.h"
+# include "../loadstore/short_atomic_store.h"
+# endif
+
+/* NEC LE-IT: replace the SWAP as recommended by ARM:
+ "Applies to: ARM11 Cores
+ Though the SWP instruction will still work with ARM V6 cores, it is
+ recommended to use the new V6 synchronization instructions. The SWP
+ instruction produces 'locked' read and write accesses which are atomic,
+ i.e. another operation cannot be done between these locked accesses which
+ ties up external bus (AHB, AXI) bandwidth and can increase worst case
+ interrupt latencies. LDREX, STREX are more flexible, other instructions
+ can be done between the LDREX and STREX accesses."
+*/
+#ifndef AO_PREFER_GENERALIZED
+#if !defined(AO_FORCE_USE_SWP) || !defined(AO_ARM_HAVE_SWP)
+ /* But, on the other hand, there could be a considerable performance */
+ /* degradation in case of a race. Eg., test_atomic.c executing */
+ /* test_and_set test on a dual-core ARMv7 processor using LDREX/STREX */
+ /* showed around 35 times lower performance than that using SWP. */
+ /* To force use of SWP instruction, use -D AO_FORCE_USE_SWP option */
+ /* (the latter is ignored if SWP instruction is unsupported). */
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set(volatile AO_TS_t *addr)
+ {
+ AO_TS_VAL_t oldval;
+ int flag;
+
+ __asm__ __volatile__("@AO_test_and_set\n"
+ AO_THUMB_GO_ARM
+ "1: ldrex %0, [%3]\n"
+ " strex %1, %4, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b\n"
+ AO_THUMB_RESTORE_MODE
+ : "=&r"(oldval), "=&r"(flag), "+m"(*addr)
+ : "r"(addr), "r"(1)
+ : AO_THUMB_SWITCH_CLOBBERS "cc");
+ return oldval;
+ }
+# define AO_HAVE_test_and_set
+#endif /* !AO_FORCE_USE_SWP */
+
+AO_INLINE AO_t
+AO_fetch_and_add(volatile AO_t *p, AO_t incr)
+{
+ AO_t result, tmp;
+ int flag;
+
+ __asm__ __volatile__("@AO_fetch_and_add\n"
+ AO_THUMB_GO_ARM
+ "1: ldrex %0, [%5]\n" /* get original */
+ " add %2, %0, %4\n" /* sum up in incr */
+ " strex %1, %2, [%5]\n" /* store them */
+ " teq %1, #0\n"
+ " bne 1b\n"
+ AO_THUMB_RESTORE_MODE
+ : "=&r"(result), "=&r"(flag), "=&r"(tmp), "+m"(*p) /* 0..3 */
+ : "r"(incr), "r"(p) /* 4..5 */
+ : AO_THUMB_SWITCH_CLOBBERS "cc");
+ return result;
+}
+#define AO_HAVE_fetch_and_add
+
+AO_INLINE AO_t
+AO_fetch_and_add1(volatile AO_t *p)
+{
+ AO_t result, tmp;
+ int flag;
+
+ __asm__ __volatile__("@AO_fetch_and_add1\n"
+ AO_THUMB_GO_ARM
+ "1: ldrex %0, [%4]\n" /* get original */
+ " add %1, %0, #1\n" /* increment */
+ " strex %2, %1, [%4]\n" /* store them */
+ " teq %2, #0\n"
+ " bne 1b\n"
+ AO_THUMB_RESTORE_MODE
+ : "=&r"(result), "=&r"(tmp), "=&r"(flag), "+m"(*p)
+ : "r"(p)
+ : AO_THUMB_SWITCH_CLOBBERS "cc");
+ return result;
+}
+#define AO_HAVE_fetch_and_add1
+
+AO_INLINE AO_t
+AO_fetch_and_sub1(volatile AO_t *p)
+{
+ AO_t result, tmp;
+ int flag;
+
+ __asm__ __volatile__("@AO_fetch_and_sub1\n"
+ AO_THUMB_GO_ARM
+ "1: ldrex %0, [%4]\n" /* get original */
+ " sub %1, %0, #1\n" /* decrement */
+ " strex %2, %1, [%4]\n" /* store them */
+ " teq %2, #0\n"
+ " bne 1b\n"
+ AO_THUMB_RESTORE_MODE
+ : "=&r"(result), "=&r"(tmp), "=&r"(flag), "+m"(*p)
+ : "r"(p)
+ : AO_THUMB_SWITCH_CLOBBERS "cc");
+ return result;
+}
+#define AO_HAVE_fetch_and_sub1
+
+AO_INLINE void
+AO_and(volatile AO_t *p, AO_t value)
+{
+ AO_t tmp, result;
+
+ __asm__ __volatile__("@AO_and\n"
+ AO_THUMB_GO_ARM
+ "1: ldrex %0, [%4]\n"
+ " and %1, %0, %3\n"
+ " strex %0, %1, [%4]\n"
+ " teq %0, #0\n"
+ " bne 1b\n"
+ AO_THUMB_RESTORE_MODE
+ : "=&r" (tmp), "=&r" (result), "+m" (*p)
+ : "r" (value), "r" (p)
+ : AO_THUMB_SWITCH_CLOBBERS "cc");
+}
+#define AO_HAVE_and
+
+AO_INLINE void
+AO_or(volatile AO_t *p, AO_t value)
+{
+ AO_t tmp, result;
+
+ __asm__ __volatile__("@AO_or\n"
+ AO_THUMB_GO_ARM
+ "1: ldrex %0, [%4]\n"
+ " orr %1, %0, %3\n"
+ " strex %0, %1, [%4]\n"
+ " teq %0, #0\n"
+ " bne 1b\n"
+ AO_THUMB_RESTORE_MODE
+ : "=&r" (tmp), "=&r" (result), "+m" (*p)
+ : "r" (value), "r" (p)
+ : AO_THUMB_SWITCH_CLOBBERS "cc");
+}
+#define AO_HAVE_or
+
+AO_INLINE void
+AO_xor(volatile AO_t *p, AO_t value)
+{
+ AO_t tmp, result;
+
+ __asm__ __volatile__("@AO_xor\n"
+ AO_THUMB_GO_ARM
+ "1: ldrex %0, [%4]\n"
+ " eor %1, %0, %3\n"
+ " strex %0, %1, [%4]\n"
+ " teq %0, #0\n"
+ " bne 1b\n"
+ AO_THUMB_RESTORE_MODE
+ : "=&r" (tmp), "=&r" (result), "+m" (*p)
+ : "r" (value), "r" (p)
+ : AO_THUMB_SWITCH_CLOBBERS "cc");
+}
+#define AO_HAVE_xor
+#endif /* !AO_PREFER_GENERALIZED */
+
+#ifdef AO_ARM_HAVE_LDREXBH
+ AO_INLINE unsigned char
+ AO_char_fetch_and_add(volatile unsigned char *p, unsigned char incr)
+ {
+ unsigned result, tmp;
+ int flag;
+
+ __asm__ __volatile__("@AO_char_fetch_and_add\n"
+ AO_THUMB_GO_ARM
+ "1: ldrexb %0, [%5]\n"
+ " add %2, %0, %4\n"
+ " strexb %1, %2, [%5]\n"
+ " teq %1, #0\n"
+ " bne 1b\n"
+ AO_THUMB_RESTORE_MODE
+ : "=&r" (result), "=&r" (flag), "=&r" (tmp), "+m" (*p)
+ : "r" ((unsigned)incr), "r" (p)
+ : AO_THUMB_SWITCH_CLOBBERS "cc");
+ return (unsigned char)result;
+ }
+# define AO_HAVE_char_fetch_and_add
+
+ AO_INLINE unsigned short
+ AO_short_fetch_and_add(volatile unsigned short *p, unsigned short incr)
+ {
+ unsigned result, tmp;
+ int flag;
+
+ __asm__ __volatile__("@AO_short_fetch_and_add\n"
+ AO_THUMB_GO_ARM
+ "1: ldrexh %0, [%5]\n"
+ " add %2, %0, %4\n"
+ " strexh %1, %2, [%5]\n"
+ " teq %1, #0\n"
+ " bne 1b\n"
+ AO_THUMB_RESTORE_MODE
+ : "=&r" (result), "=&r" (flag), "=&r" (tmp), "+m" (*p)
+ : "r" ((unsigned)incr), "r" (p)
+ : AO_THUMB_SWITCH_CLOBBERS "cc");
+ return (unsigned short)result;
+ }
+# define AO_HAVE_short_fetch_and_add
+#endif /* AO_ARM_HAVE_LDREXBH */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ /* Returns nonzero if the comparison succeeded. */
+ AO_INLINE int
+ AO_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
+ {
+ AO_t result, tmp;
+
+ __asm__ __volatile__("@AO_compare_and_swap\n"
+ AO_THUMB_GO_ARM
+ "1: mov %0, #2\n" /* store a flag */
+ " ldrex %1, [%3]\n" /* get original */
+ " teq %1, %4\n" /* see if match */
+# ifdef __thumb2__
+ /* TODO: Eliminate warning: it blocks containing wide Thumb */
+ /* instructions are deprecated in ARMv8. */
+ " it eq\n"
+# endif
+ " strexeq %0, %5, [%3]\n" /* store new one if matched */
+ " teq %0, #1\n"
+ " beq 1b\n" /* if update failed, repeat */
+ AO_THUMB_RESTORE_MODE
+ : "=&r"(result), "=&r"(tmp), "+m"(*addr)
+ : "r"(addr), "r"(old_val), "r"(new_val)
+ : AO_THUMB_SWITCH_CLOBBERS "cc");
+ return !(result&2); /* if succeded, return 1, else 0 */
+ }
+# define AO_HAVE_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
+{
+ AO_t fetched_val;
+ int flag;
+
+ __asm__ __volatile__("@AO_fetch_compare_and_swap\n"
+ AO_THUMB_GO_ARM
+ "1: mov %0, #2\n" /* store a flag */
+ " ldrex %1, [%3]\n" /* get original */
+ " teq %1, %4\n" /* see if match */
+# ifdef __thumb2__
+ " it eq\n"
+# endif
+ " strexeq %0, %5, [%3]\n" /* store new one if matched */
+ " teq %0, #1\n"
+ " beq 1b\n" /* if update failed, repeat */
+ AO_THUMB_RESTORE_MODE
+ : "=&r"(flag), "=&r"(fetched_val), "+m"(*addr)
+ : "r"(addr), "r"(old_val), "r"(new_val)
+ : AO_THUMB_SWITCH_CLOBBERS "cc");
+ return fetched_val;
+}
+#define AO_HAVE_fetch_compare_and_swap
+
+#ifdef AO_ARM_HAVE_LDREXD
+# include "../standard_ao_double_t.h"
+
+ /* "ARM Architecture Reference Manual ARMv7-A/R edition" (chapter */
+ /* A3.5.3) says that memory accesses caused by LDREXD and STREXD */
+ /* instructions to doubleword-aligned locations are single-copy */
+ /* atomic; accesses to 64-bit elements by other instructions might */
+ /* not be single-copy atomic as they are executed as a sequence of */
+ /* 32-bit accesses. */
+ AO_INLINE AO_double_t
+ AO_double_load(const volatile AO_double_t *addr)
+ {
+ AO_double_t result;
+
+ /* AO_THUMB_GO_ARM is empty. */
+ __asm__ __volatile__("@AO_double_load\n"
+ " ldrexd %0, %H0, [%1]"
+ : "=&r" (result.AO_whole)
+ : "r" (addr)
+ /* : no clobber */);
+ return result;
+ }
+# define AO_HAVE_double_load
+
+ AO_INLINE void
+ AO_double_store(volatile AO_double_t *addr, AO_double_t new_val)
+ {
+ AO_double_t old_val;
+ int status;
+
+ do {
+ /* AO_THUMB_GO_ARM is empty. */
+ __asm__ __volatile__("@AO_double_store\n"
+ " ldrexd %0, %H0, [%3]\n"
+ " strexd %1, %4, %H4, [%3]"
+ : "=&r" (old_val.AO_whole), "=&r" (status), "+m" (*addr)
+ : "r" (addr), "r" (new_val.AO_whole)
+ : "cc");
+ } while (AO_EXPECT_FALSE(status));
+ }
+# define AO_HAVE_double_store
+
+ AO_INLINE int
+ AO_double_compare_and_swap(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ double_ptr_storage tmp;
+ int result = 1;
+
+ do {
+ /* AO_THUMB_GO_ARM is empty. */
+ __asm__ __volatile__("@AO_double_compare_and_swap\n"
+ " ldrexd %0, %H0, [%1]\n" /* get original to r1 & r2 */
+ : "=&r"(tmp)
+ : "r"(addr)
+ /* : no clobber */);
+ if (tmp != old_val.AO_whole)
+ break;
+ __asm__ __volatile__(
+ " strexd %0, %3, %H3, [%2]\n" /* store new one if matched */
+ : "=&r"(result), "+m"(*addr)
+ : "r" (addr), "r" (new_val.AO_whole)
+ : "cc");
+ } while (AO_EXPECT_FALSE(result));
+ return !result; /* if succeded, return 1 else 0 */
+ }
+# define AO_HAVE_double_compare_and_swap
+#endif /* AO_ARM_HAVE_LDREXD */
+
+#else
+/* pre ARMv6 architectures ... */
+
+/* I found a slide set that, if I read it correctly, claims that */
+/* Loads followed by either a Load or Store are ordered, but nothing */
+/* else is. */
+/* It appears that SWP is the only simple memory barrier. */
+#include "../all_aligned_atomic_load_store.h"
+
+/* The code should run correctly on a multi-core ARMv6+ as well. */
+
+#endif /* !AO_ARM_HAVE_LDREX */
+
+#if !defined(AO_HAVE_test_and_set_full) && !defined(AO_HAVE_test_and_set) \
+ && defined (AO_ARM_HAVE_SWP) && (!defined(AO_PREFER_GENERALIZED) \
+ || !defined(AO_HAVE_fetch_compare_and_swap))
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_full(volatile AO_TS_t *addr)
+ {
+ AO_TS_VAL_t oldval;
+ /* SWP on ARM is very similar to XCHG on x86. */
+ /* The first operand is the result, the second the value */
+ /* to be stored. Both registers must be different from addr. */
+ /* Make the address operand an early clobber output so it */
+ /* doesn't overlap with the other operands. The early clobber */
+ /* on oldval is necessary to prevent the compiler allocating */
+ /* them to the same register if they are both unused. */
+
+ __asm__ __volatile__("@AO_test_and_set_full\n"
+ AO_THUMB_GO_ARM
+ " swp %0, %2, [%3]\n"
+ /* Ignore GCC "SWP is deprecated for this architecture" */
+ /* warning here (for ARMv6+). */
+ AO_THUMB_RESTORE_MODE
+ : "=&r"(oldval), "=&r"(addr)
+ : "r"(1), "1"(addr)
+ : AO_THUMB_SWITCH_CLOBBERS "memory");
+ return oldval;
+ }
+# define AO_HAVE_test_and_set_full
+#endif /* !AO_HAVE_test_and_set[_full] && AO_ARM_HAVE_SWP */
+
+#define AO_T_IS_INT
--- /dev/null
+/*
+ * Copyright (C) 2009 Bradley Smith <brad@brad-smith.co.uk>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "../all_atomic_load_store.h"
+
+#include "../ordered.h" /* There are no multiprocessor implementations. */
+
+#include "../test_and_set_t_is_ao_t.h"
+
+#ifndef AO_PREFER_GENERALIZED
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_full(volatile AO_TS_t *addr)
+ {
+ register long ret;
+
+ __asm__ __volatile__(
+ "xchg %[oldval], %[mem], %[newval]"
+ : [oldval] "=&r"(ret)
+ : [mem] "r"(addr), [newval] "r"(1)
+ : "memory");
+
+ return (AO_TS_VAL_t)ret;
+ }
+# define AO_HAVE_test_and_set_full
+#endif /* !AO_PREFER_GENERALIZED */
+
+AO_INLINE int
+AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
+{
+ register long ret;
+
+ __asm__ __volatile__(
+ "1: ssrf 5\n"
+ " ld.w %[res], %[mem]\n"
+ " eor %[res], %[oldval]\n"
+ " brne 2f\n"
+ " stcond %[mem], %[newval]\n"
+ " brne 1b\n"
+ "2:\n"
+ : [res] "=&r"(ret), [mem] "=m"(*addr)
+ : "m"(*addr), [newval] "r"(new_val), [oldval] "r"(old)
+ : "cc", "memory");
+
+ return (int)ret;
+}
+#define AO_HAVE_compare_and_swap_full
+
+/* TODO: implement AO_fetch_compare_and_swap. */
+
+#define AO_T_IS_INT
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* FIXME: seems to be untested. */
+
+#include "../all_atomic_load_store.h"
+
+#include "../ordered.h" /* There are no multiprocessor implementations. */
+
+#include "../test_and_set_t_is_ao_t.h"
+
+/*
+ * The architecture apparently supports an "f" flag which is
+ * set on preemption. This essentially gives us load-locked,
+ * store-conditional primitives, though I'm not quite sure how
+ * this would work on a hypothetical multiprocessor. -HB
+ *
+ * For details, see
+ * http://developer.axis.com/doc/hardware/etrax100lx/prog_man/
+ * 1_architectural_description.pdf
+ *
+ * Presumably many other primitives (notably CAS, including the double-
+ * width versions) could be implemented in this manner, if someone got
+ * around to it.
+ */
+
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t *addr) {
+ /* Ripped from linuxthreads/sysdeps/cris/pt-machine.h */
+ register unsigned long int ret;
+
+ /* Note the use of a dummy output of *addr to expose the write. The
+ memory barrier is to stop *other* writes being moved past this code. */
+ __asm__ __volatile__("clearf\n"
+ "0:\n\t"
+ "movu.b [%2],%0\n\t"
+ "ax\n\t"
+ "move.b %3,[%2]\n\t"
+ "bwf 0b\n\t"
+ "clearf"
+ : "=&r" (ret), "=m" (*addr)
+ : "r" (addr), "r" ((int) 1), "m" (*addr)
+ : "memory");
+ return ret;
+}
+#define AO_HAVE_test_and_set_full
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/char
+AO_char_fetch_and_add(volatile unsigned/**/char *addr, unsigned/**/char incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_fetch_and_add
+
+AO_INLINE void
+AO_char_and(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_and
+
+AO_INLINE void
+AO_char_or(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_or
+
+AO_INLINE void
+AO_char_xor(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_xor
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/short
+AO_short_fetch_and_add(volatile unsigned/**/short *addr, unsigned/**/short incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_fetch_and_add
+
+AO_INLINE void
+AO_short_and(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_and
+
+AO_INLINE void
+AO_short_or(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_or
+
+AO_INLINE void
+AO_short_xor(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_xor
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned
+AO_int_fetch_and_add(volatile unsigned *addr, unsigned incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_fetch_and_add
+
+AO_INLINE void
+AO_int_and(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_and
+
+AO_INLINE void
+AO_int_or(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_or
+
+AO_INLINE void
+AO_int_xor(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_xor
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE AO_t
+AO_fetch_and_add(volatile AO_t *addr, AO_t incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_fetch_and_add
+
+AO_INLINE void
+AO_and(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_and
+
+AO_INLINE void
+AO_or(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_or
+
+AO_INLINE void
+AO_xor(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_xor
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/char
+AO_char_fetch_and_add_acquire(volatile unsigned/**/char *addr, unsigned/**/char incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_char_fetch_and_add_acquire
+
+AO_INLINE void
+AO_char_and_acquire(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_char_and_acquire
+
+AO_INLINE void
+AO_char_or_acquire(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_char_or_acquire
+
+AO_INLINE void
+AO_char_xor_acquire(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_char_xor_acquire
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/short
+AO_short_fetch_and_add_acquire(volatile unsigned/**/short *addr, unsigned/**/short incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_short_fetch_and_add_acquire
+
+AO_INLINE void
+AO_short_and_acquire(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_short_and_acquire
+
+AO_INLINE void
+AO_short_or_acquire(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_short_or_acquire
+
+AO_INLINE void
+AO_short_xor_acquire(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_short_xor_acquire
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned
+AO_int_fetch_and_add_acquire(volatile unsigned *addr, unsigned incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_int_fetch_and_add_acquire
+
+AO_INLINE void
+AO_int_and_acquire(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_int_and_acquire
+
+AO_INLINE void
+AO_int_or_acquire(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_int_or_acquire
+
+AO_INLINE void
+AO_int_xor_acquire(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_int_xor_acquire
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE AO_t
+AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_fetch_and_add_acquire
+
+AO_INLINE void
+AO_and_acquire(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_and_acquire
+
+AO_INLINE void
+AO_or_acquire(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_or_acquire
+
+AO_INLINE void
+AO_xor_acquire(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_xor_acquire
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/char
+AO_char_fetch_and_add_release(volatile unsigned/**/char *addr, unsigned/**/char incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_char_fetch_and_add_release
+
+AO_INLINE void
+AO_char_and_release(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_char_and_release
+
+AO_INLINE void
+AO_char_or_release(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_char_or_release
+
+AO_INLINE void
+AO_char_xor_release(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_char_xor_release
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/short
+AO_short_fetch_and_add_release(volatile unsigned/**/short *addr, unsigned/**/short incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_short_fetch_and_add_release
+
+AO_INLINE void
+AO_short_and_release(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_short_and_release
+
+AO_INLINE void
+AO_short_or_release(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_short_or_release
+
+AO_INLINE void
+AO_short_xor_release(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_short_xor_release
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned
+AO_int_fetch_and_add_release(volatile unsigned *addr, unsigned incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_int_fetch_and_add_release
+
+AO_INLINE void
+AO_int_and_release(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_int_and_release
+
+AO_INLINE void
+AO_int_or_release(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_int_or_release
+
+AO_INLINE void
+AO_int_xor_release(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_int_xor_release
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE AO_t
+AO_fetch_and_add_release(volatile AO_t *addr, AO_t incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_fetch_and_add_release
+
+AO_INLINE void
+AO_and_release(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_and_release
+
+AO_INLINE void
+AO_or_release(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_or_release
+
+AO_INLINE void
+AO_xor_release(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_xor_release
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/char
+AO_char_fetch_and_add_full(volatile unsigned/**/char *addr, unsigned/**/char incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_char_fetch_and_add_full
+
+AO_INLINE void
+AO_char_and_full(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_char_and_full
+
+AO_INLINE void
+AO_char_or_full(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_char_or_full
+
+AO_INLINE void
+AO_char_xor_full(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_char_xor_full
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/short
+AO_short_fetch_and_add_full(volatile unsigned/**/short *addr, unsigned/**/short incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_short_fetch_and_add_full
+
+AO_INLINE void
+AO_short_and_full(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_short_and_full
+
+AO_INLINE void
+AO_short_or_full(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_short_or_full
+
+AO_INLINE void
+AO_short_xor_full(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_short_xor_full
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned
+AO_int_fetch_and_add_full(volatile unsigned *addr, unsigned incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_int_fetch_and_add_full
+
+AO_INLINE void
+AO_int_and_full(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_int_and_full
+
+AO_INLINE void
+AO_int_or_full(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_int_or_full
+
+AO_INLINE void
+AO_int_xor_full(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_int_xor_full
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE AO_t
+AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_fetch_and_add_full
+
+AO_INLINE void
+AO_and_full(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_and_full
+
+AO_INLINE void
+AO_or_full(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_or_full
+
+AO_INLINE void
+AO_xor_full(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_xor_full
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE XCTYPE
+AO_XSIZE_fetch_and_add_XBAR(volatile XCTYPE *addr, XCTYPE incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_XGCCBAR);
+}
+#define AO_HAVE_XSIZE_fetch_and_add_XBAR
+
+AO_INLINE void
+AO_XSIZE_and_XBAR(volatile XCTYPE *addr, XCTYPE value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_XGCCBAR);
+}
+#define AO_HAVE_XSIZE_and_XBAR
+
+AO_INLINE void
+AO_XSIZE_or_XBAR(volatile XCTYPE *addr, XCTYPE value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_XGCCBAR);
+}
+#define AO_HAVE_XSIZE_or_XBAR
+
+AO_INLINE void
+AO_XSIZE_xor_XBAR(volatile XCTYPE *addr, XCTYPE value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_XGCCBAR);
+}
+#define AO_HAVE_XSIZE_xor_XBAR
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/char
+AO_char_load(const volatile unsigned/**/char *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_load
+
+AO_INLINE unsigned/**/char
+AO_char_load_acquire(const volatile unsigned/**/char *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_char_load_acquire
+
+/* char_load_full is generalized using load and nop_full, so that */
+/* char_load_read is defined using load and nop_read. */
+/* char_store_full definition is omitted similar to load_full reason. */
+
+AO_INLINE void
+AO_char_store(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_store
+
+AO_INLINE void
+AO_char_store_release(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_char_store_release
+
+AO_INLINE unsigned/**/char
+AO_char_fetch_compare_and_swap(volatile unsigned/**/char *addr,
+ unsigned/**/char old_val, unsigned/**/char new_val)
+{
+ return __sync_val_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+}
+#define AO_HAVE_char_fetch_compare_and_swap
+
+/* TODO: Add CAS _acquire/release/full primitives. */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ AO_INLINE int
+ AO_char_compare_and_swap(volatile unsigned/**/char *addr,
+ unsigned/**/char old_val, unsigned/**/char new_val)
+ {
+ return __sync_bool_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+ }
+# define AO_HAVE_char_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/short
+AO_short_load(const volatile unsigned/**/short *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_load
+
+AO_INLINE unsigned/**/short
+AO_short_load_acquire(const volatile unsigned/**/short *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_short_load_acquire
+
+/* short_load_full is generalized using load and nop_full, so that */
+/* short_load_read is defined using load and nop_read. */
+/* short_store_full definition is omitted similar to load_full reason. */
+
+AO_INLINE void
+AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_store
+
+AO_INLINE void
+AO_short_store_release(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_short_store_release
+
+AO_INLINE unsigned/**/short
+AO_short_fetch_compare_and_swap(volatile unsigned/**/short *addr,
+ unsigned/**/short old_val, unsigned/**/short new_val)
+{
+ return __sync_val_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+}
+#define AO_HAVE_short_fetch_compare_and_swap
+
+/* TODO: Add CAS _acquire/release/full primitives. */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ AO_INLINE int
+ AO_short_compare_and_swap(volatile unsigned/**/short *addr,
+ unsigned/**/short old_val, unsigned/**/short new_val)
+ {
+ return __sync_bool_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+ }
+# define AO_HAVE_short_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned
+AO_int_load(const volatile unsigned *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_load
+
+AO_INLINE unsigned
+AO_int_load_acquire(const volatile unsigned *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_int_load_acquire
+
+/* int_load_full is generalized using load and nop_full, so that */
+/* int_load_read is defined using load and nop_read. */
+/* int_store_full definition is omitted similar to load_full reason. */
+
+AO_INLINE void
+AO_int_store(volatile unsigned *addr, unsigned value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_store
+
+AO_INLINE void
+AO_int_store_release(volatile unsigned *addr, unsigned value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_int_store_release
+
+AO_INLINE unsigned
+AO_int_fetch_compare_and_swap(volatile unsigned *addr,
+ unsigned old_val, unsigned new_val)
+{
+ return __sync_val_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+}
+#define AO_HAVE_int_fetch_compare_and_swap
+
+/* TODO: Add CAS _acquire/release/full primitives. */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ AO_INLINE int
+ AO_int_compare_and_swap(volatile unsigned *addr,
+ unsigned old_val, unsigned new_val)
+ {
+ return __sync_bool_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+ }
+# define AO_HAVE_int_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE AO_t
+AO_load(const volatile AO_t *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_load
+
+AO_INLINE AO_t
+AO_load_acquire(const volatile AO_t *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_load_acquire
+
+/* load_full is generalized using load and nop_full, so that */
+/* load_read is defined using load and nop_read. */
+/* store_full definition is omitted similar to load_full reason. */
+
+AO_INLINE void
+AO_store(volatile AO_t *addr, AO_t value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_store
+
+AO_INLINE void
+AO_store_release(volatile AO_t *addr, AO_t value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_store_release
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap(volatile AO_t *addr,
+ AO_t old_val, AO_t new_val)
+{
+ return __sync_val_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+}
+#define AO_HAVE_fetch_compare_and_swap
+
+/* TODO: Add CAS _acquire/release/full primitives. */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ AO_INLINE int
+ AO_compare_and_swap(volatile AO_t *addr,
+ AO_t old_val, AO_t new_val)
+ {
+ return __sync_bool_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+ }
+# define AO_HAVE_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE XCTYPE
+AO_XSIZE_load(const volatile XCTYPE *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_XSIZE_load
+
+AO_INLINE XCTYPE
+AO_XSIZE_load_acquire(const volatile XCTYPE *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_XSIZE_load_acquire
+
+/* XSIZE_load_full is generalized using load and nop_full, so that */
+/* XSIZE_load_read is defined using load and nop_read. */
+/* XSIZE_store_full definition is omitted similar to load_full reason. */
+
+AO_INLINE void
+AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_XSIZE_store
+
+AO_INLINE void
+AO_XSIZE_store_release(volatile XCTYPE *addr, XCTYPE value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_XSIZE_store_release
+
+AO_INLINE XCTYPE
+AO_XSIZE_fetch_compare_and_swap(volatile XCTYPE *addr,
+ XCTYPE old_val, XCTYPE new_val)
+{
+ return __sync_val_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+}
+#define AO_HAVE_XSIZE_fetch_compare_and_swap
+
+/* TODO: Add CAS _acquire/release/full primitives. */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ AO_INLINE int
+ AO_XSIZE_compare_and_swap(volatile XCTYPE *addr,
+ XCTYPE old_val, XCTYPE new_val)
+ {
+ return __sync_bool_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+ }
+# define AO_HAVE_XSIZE_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+/* The following implementation assumes GCC 4.7 or later. */
+/* For the details, see GNU Manual, chapter 6.52 (Built-in functions */
+/* for memory model aware atomic operations). */
+
+/* TODO: Include this file for other targets if gcc 4.7+ */
+
+#ifdef AO_UNIPROCESSOR
+ /* If only a single processor (core) is used, AO_UNIPROCESSOR could */
+ /* be defined by the client to avoid unnecessary memory barrier. */
+ AO_INLINE void
+ AO_nop_full(void)
+ {
+ AO_compiler_barrier();
+ }
+# define AO_HAVE_nop_full
+
+#else
+ AO_INLINE void
+ AO_nop_read(void)
+ {
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+ }
+# define AO_HAVE_nop_read
+
+# ifndef AO_HAVE_nop_write
+ AO_INLINE void
+ AO_nop_write(void)
+ {
+ __atomic_thread_fence(__ATOMIC_RELEASE);
+ }
+# define AO_HAVE_nop_write
+# endif
+
+ AO_INLINE void
+ AO_nop_full(void)
+ {
+ /* __sync_synchronize() could be used instead. */
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+ }
+# define AO_HAVE_nop_full
+#endif /* !AO_UNIPROCESSOR */
+
+#include "generic-small.h"
+
+#ifndef AO_PREFER_GENERALIZED
+# include "generic-arithm.h"
+
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set(volatile AO_TS_t *addr)
+ {
+ return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_RELAXED);
+ }
+# define AO_HAVE_test_and_set
+
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_acquire(volatile AO_TS_t *addr)
+ {
+ return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_ACQUIRE);
+ }
+# define AO_HAVE_test_and_set_acquire
+
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_release(volatile AO_TS_t *addr)
+ {
+ return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_RELEASE);
+ }
+# define AO_HAVE_test_and_set_release
+
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_full(volatile AO_TS_t *addr)
+ {
+ return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_SEQ_CST);
+ }
+# define AO_HAVE_test_and_set_full
+#endif /* !AO_PREFER_GENERALIZED */
+
+#ifdef AO_HAVE_DOUBLE_PTR_STORAGE
+
+# ifndef AO_HAVE_double_load
+ AO_INLINE AO_double_t
+ AO_double_load(const volatile AO_double_t *addr)
+ {
+ AO_double_t result;
+
+ result.AO_whole = __atomic_load_n(&addr->AO_whole, __ATOMIC_RELAXED);
+ return result;
+ }
+# define AO_HAVE_double_load
+# endif
+
+# ifndef AO_HAVE_double_load_acquire
+ AO_INLINE AO_double_t
+ AO_double_load_acquire(const volatile AO_double_t *addr)
+ {
+ AO_double_t result;
+
+ result.AO_whole = __atomic_load_n(&addr->AO_whole, __ATOMIC_ACQUIRE);
+ return result;
+ }
+# define AO_HAVE_double_load_acquire
+# endif
+
+# ifndef AO_HAVE_double_store
+ AO_INLINE void
+ AO_double_store(volatile AO_double_t *addr, AO_double_t value)
+ {
+ __atomic_store_n(&addr->AO_whole, value.AO_whole, __ATOMIC_RELAXED);
+ }
+# define AO_HAVE_double_store
+# endif
+
+# ifndef AO_HAVE_double_store_release
+ AO_INLINE void
+ AO_double_store_release(volatile AO_double_t *addr, AO_double_t value)
+ {
+ __atomic_store_n(&addr->AO_whole, value.AO_whole, __ATOMIC_RELEASE);
+ }
+# define AO_HAVE_double_store_release
+# endif
+
+# ifndef AO_HAVE_double_compare_and_swap
+ AO_INLINE int
+ AO_double_compare_and_swap(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ return (int)__atomic_compare_exchange_n(&addr->AO_whole,
+ &old_val.AO_whole /* p_expected */,
+ new_val.AO_whole /* desired */,
+ 0 /* is_weak: false */,
+ __ATOMIC_RELAXED /* success */,
+ __ATOMIC_RELAXED /* failure */);
+ }
+# define AO_HAVE_double_compare_and_swap
+# endif
+
+ /* TODO: Add double CAS _acquire/release/full primitives. */
+#endif /* AO_HAVE_DOUBLE_PTR_STORAGE */
--- /dev/null
+/*
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+#include "../all_aligned_atomic_load_store.h"
+
+#include "../test_and_set_t_is_ao_t.h"
+
+/* There's also "isync" and "barrier"; however, for all current CPU */
+/* versions, "syncht" should suffice. Likewise, it seems that the */
+/* auto-defined versions of *_acquire, *_release or *_full suffice for */
+/* all current ISA implementations. */
+AO_INLINE void
+AO_nop_full(void)
+{
+ __asm__ __volatile__("syncht" : : : "memory");
+}
+#define AO_HAVE_nop_full
+
+/* The Hexagon has load-locked, store-conditional primitives, and so */
+/* resulting code is very nearly identical to that of PowerPC. */
+
+#ifndef AO_PREFER_GENERALIZED
+AO_INLINE AO_t
+AO_fetch_and_add(volatile AO_t *addr, AO_t incr)
+{
+ AO_t oldval;
+ AO_t newval;
+ __asm__ __volatile__(
+ "1:\n"
+ " %0 = memw_locked(%3);\n" /* load and reserve */
+ " %1 = add (%0,%4);\n" /* increment */
+ " memw_locked(%3,p1) = %1;\n" /* store conditional */
+ " if (!p1) jump 1b;\n" /* retry if lost reservation */
+ : "=&r"(oldval), "=&r"(newval), "+m"(*addr)
+ : "r"(addr), "r"(incr)
+ : "memory", "p1");
+ return oldval;
+}
+#define AO_HAVE_fetch_and_add
+
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set(volatile AO_TS_t *addr)
+{
+ int oldval;
+ int locked_value = 1;
+
+ __asm__ __volatile__(
+ "1:\n"
+ " %0 = memw_locked(%2);\n" /* load and reserve */
+ " {\n"
+ " p2 = cmp.eq(%0,#0);\n" /* if load is not zero, */
+ " if (!p2.new) jump:nt 2f; \n" /* we are done */
+ " }\n"
+ " memw_locked(%2,p1) = %3;\n" /* else store conditional */
+ " if (!p1) jump 1b;\n" /* retry if lost reservation */
+ "2:\n" /* oldval is zero if we set */
+ : "=&r"(oldval), "+m"(*addr)
+ : "r"(addr), "r"(locked_value)
+ : "memory", "p1", "p2");
+ return (AO_TS_VAL_t)oldval;
+}
+#define AO_HAVE_test_and_set
+#endif /* !AO_PREFER_GENERALIZED */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ AO_INLINE int
+ AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val)
+ {
+ AO_t __oldval;
+ int result = 0;
+ __asm__ __volatile__(
+ "1:\n"
+ " %0 = memw_locked(%3);\n" /* load and reserve */
+ " {\n"
+ " p2 = cmp.eq(%0,%4);\n" /* if load is not equal to */
+ " if (!p2.new) jump:nt 2f; \n" /* old, fail */
+ " }\n"
+ " memw_locked(%3,p1) = %5;\n" /* else store conditional */
+ " if (!p1) jump 1b;\n" /* retry if lost reservation */
+ " %1 = #1\n" /* success, result = 1 */
+ "2:\n"
+ : "=&r" (__oldval), "+r" (result), "+m"(*addr)
+ : "r" (addr), "r" (old), "r" (new_val)
+ : "p1", "p2", "memory"
+ );
+ return result;
+ }
+# define AO_HAVE_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
+{
+ AO_t __oldval;
+
+ __asm__ __volatile__(
+ "1:\n"
+ " %0 = memw_locked(%2);\n" /* load and reserve */
+ " {\n"
+ " p2 = cmp.eq(%0,%3);\n" /* if load is not equal to */
+ " if (!p2.new) jump:nt 2f; \n" /* old_val, fail */
+ " }\n"
+ " memw_locked(%2,p1) = %4;\n" /* else store conditional */
+ " if (!p1) jump 1b;\n" /* retry if lost reservation */
+ "2:\n"
+ : "=&r" (__oldval), "+m"(*addr)
+ : "r" (addr), "r" (old_val), "r" (new_val)
+ : "p1", "p2", "memory"
+ );
+ return __oldval;
+}
+#define AO_HAVE_fetch_compare_and_swap
+
+#define AO_T_IS_INT
--- /dev/null
+/*
+ * Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "../all_atomic_load_store.h"
+
+/* Some architecture set descriptions include special "ordered" memory */
+/* operations. As far as we can tell, no existing processors actually */
+/* require those. Nor does it appear likely that future processors */
+/* will. */
+#include "../ordered.h"
+
+/* GCC will not guarantee the alignment we need, use four lock words */
+/* and select the correctly aligned datum. See the glibc 2.3.2 */
+/* linuxthread port for the original implementation. */
+struct AO_pa_clearable_loc {
+ int data[4];
+};
+
+#undef AO_TS_INITIALIZER
+#define AO_TS_t struct AO_pa_clearable_loc
+#define AO_TS_INITIALIZER {1,1,1,1}
+/* Switch meaning of set and clear, since we only have an atomic clear */
+/* instruction. */
+typedef enum {AO_PA_TS_set = 0, AO_PA_TS_clear = 1} AO_PA_TS_val;
+#define AO_TS_VAL_t AO_PA_TS_val
+#define AO_TS_CLEAR AO_PA_TS_clear
+#define AO_TS_SET AO_PA_TS_set
+
+/* The hppa only has one atomic read and modify memory operation, */
+/* load and clear, so hppa spinlocks must use zero to signify that */
+/* someone is holding the lock. The address used for the ldcw */
+/* semaphore must be 16-byte aligned. */
+#define AO_ldcw(a, ret) \
+ __asm__ __volatile__("ldcw 0(%2), %0" \
+ : "=r" (ret), "=m" (*(a)) : "r" (a))
+
+/* Because malloc only guarantees 8-byte alignment for malloc'd data, */
+/* and GCC only guarantees 8-byte alignment for stack locals, we can't */
+/* be assured of 16-byte alignment for atomic lock data even if we */
+/* specify "__attribute ((aligned(16)))" in the type declaration. So, */
+/* we use a struct containing an array of four ints for the atomic lock */
+/* type and dynamically select the 16-byte aligned int from the array */
+/* for the semaphore. */
+#define AO_PA_LDCW_ALIGNMENT 16
+#define AO_ldcw_align(addr) \
+ ((volatile unsigned *)(((unsigned long)(addr) \
+ + (AO_PA_LDCW_ALIGNMENT - 1)) \
+ & ~(AO_PA_LDCW_ALIGNMENT - 1)))
+
+/* Works on PA 1.1 and PA 2.0 systems */
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t * addr)
+{
+ volatile unsigned int ret;
+ volatile unsigned *a = AO_ldcw_align(addr);
+
+ AO_ldcw(a, ret);
+ return (AO_TS_VAL_t)ret;
+}
+#define AO_HAVE_test_and_set_full
+
+AO_INLINE void
+AO_pa_clear(volatile AO_TS_t * addr)
+{
+ volatile unsigned *a = AO_ldcw_align(addr);
+
+ AO_compiler_barrier();
+ *a = 1;
+}
+#define AO_CLEAR(addr) AO_pa_clear(addr)
--- /dev/null
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "../all_atomic_load_store.h"
+
+#include "../all_acquire_release_volatile.h"
+
+#include "../test_and_set_t_is_char.h"
+
+#ifdef _ILP32
+ /* 32-bit HP/UX code. */
+ /* This requires pointer "swizzling". Pointers need to be expanded */
+ /* to 64 bits using the addp4 instruction before use. This makes it */
+ /* hard to share code, but we try anyway. */
+# define AO_LEN "4"
+ /* We assume that addr always appears in argument position 1 in asm */
+ /* code. If it is clobbered due to swizzling, we also need it in */
+ /* second position. Any later arguments are referenced symbolically, */
+ /* so that we don't have to worry about their position. This requires*/
+ /* gcc 3.1, but you shouldn't be using anything older than that on */
+ /* IA64 anyway. */
+ /* The AO_MASK macro is a workaround for the fact that HP/UX gcc */
+ /* appears to otherwise store 64-bit pointers in ar.ccv, i.e. it */
+ /* doesn't appear to clear high bits in a pointer value we pass into */
+ /* assembly code, even if it is supposedly of type AO_t. */
+# define AO_IN_ADDR "1"(addr)
+# define AO_OUT_ADDR , "=r"(addr)
+# define AO_SWIZZLE "addp4 %1=0,%1;;\n"
+# define AO_MASK(ptr) __asm__ __volatile__("zxt4 %1=%1": "=r"(ptr) : "0"(ptr))
+#else
+# define AO_LEN "8"
+# define AO_IN_ADDR "r"(addr)
+# define AO_OUT_ADDR
+# define AO_SWIZZLE
+# define AO_MASK(ptr) /* empty */
+#endif /* !_ILP32 */
+
+AO_INLINE void
+AO_nop_full(void)
+{
+ __asm__ __volatile__("mf" : : : "memory");
+}
+#define AO_HAVE_nop_full
+
+#ifndef AO_PREFER_GENERALIZED
+AO_INLINE AO_t
+AO_fetch_and_add1_acquire (volatile AO_t *addr)
+{
+ AO_t result;
+
+ __asm__ __volatile__ (AO_SWIZZLE
+ "fetchadd" AO_LEN ".acq %0=[%1],1":
+ "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
+ return result;
+}
+#define AO_HAVE_fetch_and_add1_acquire
+
+AO_INLINE AO_t
+AO_fetch_and_add1_release (volatile AO_t *addr)
+{
+ AO_t result;
+
+ __asm__ __volatile__ (AO_SWIZZLE
+ "fetchadd" AO_LEN ".rel %0=[%1],1":
+ "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
+ return result;
+}
+#define AO_HAVE_fetch_and_add1_release
+
+AO_INLINE AO_t
+AO_fetch_and_sub1_acquire (volatile AO_t *addr)
+{
+ AO_t result;
+
+ __asm__ __volatile__ (AO_SWIZZLE
+ "fetchadd" AO_LEN ".acq %0=[%1],-1":
+ "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
+ return result;
+}
+#define AO_HAVE_fetch_and_sub1_acquire
+
+AO_INLINE AO_t
+AO_fetch_and_sub1_release (volatile AO_t *addr)
+{
+ AO_t result;
+
+ __asm__ __volatile__ (AO_SWIZZLE
+ "fetchadd" AO_LEN ".rel %0=[%1],-1":
+ "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
+ return result;
+}
+#define AO_HAVE_fetch_and_sub1_release
+#endif /* !AO_PREFER_GENERALIZED */
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val)
+{
+ AO_t fetched_val;
+ AO_MASK(old);
+ __asm__ __volatile__(AO_SWIZZLE
+ "mov ar.ccv=%[old] ;; cmpxchg" AO_LEN
+ ".acq %0=[%1],%[new_val],ar.ccv"
+ : "=r"(fetched_val) AO_OUT_ADDR
+ : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"(old)
+ : "memory");
+ return fetched_val;
+}
+#define AO_HAVE_fetch_compare_and_swap_acquire
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old, AO_t new_val)
+{
+ AO_t fetched_val;
+ AO_MASK(old);
+ __asm__ __volatile__(AO_SWIZZLE
+ "mov ar.ccv=%[old] ;; cmpxchg" AO_LEN
+ ".rel %0=[%1],%[new_val],ar.ccv"
+ : "=r"(fetched_val) AO_OUT_ADDR
+ : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"(old)
+ : "memory");
+ return fetched_val;
+}
+#define AO_HAVE_fetch_compare_and_swap_release
+
+AO_INLINE unsigned char
+AO_char_fetch_compare_and_swap_acquire(volatile unsigned char *addr,
+ unsigned char old, unsigned char new_val)
+{
+ unsigned char fetched_val;
+ __asm__ __volatile__(AO_SWIZZLE
+ "mov ar.ccv=%[old] ;; cmpxchg1.acq %0=[%1],%[new_val],ar.ccv"
+ : "=r"(fetched_val) AO_OUT_ADDR
+ : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
+ : "memory");
+ return fetched_val;
+}
+#define AO_HAVE_char_fetch_compare_and_swap_acquire
+
+AO_INLINE unsigned char
+AO_char_fetch_compare_and_swap_release(volatile unsigned char *addr,
+ unsigned char old, unsigned char new_val)
+{
+ unsigned char fetched_val;
+ __asm__ __volatile__(AO_SWIZZLE
+ "mov ar.ccv=%[old] ;; cmpxchg1.rel %0=[%1],%[new_val],ar.ccv"
+ : "=r"(fetched_val) AO_OUT_ADDR
+ : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
+ : "memory");
+ return fetched_val;
+}
+#define AO_HAVE_char_fetch_compare_and_swap_release
+
+AO_INLINE unsigned short
+AO_short_fetch_compare_and_swap_acquire(volatile unsigned short *addr,
+ unsigned short old, unsigned short new_val)
+{
+ unsigned short fetched_val;
+ __asm__ __volatile__(AO_SWIZZLE
+ "mov ar.ccv=%[old] ;; cmpxchg2.acq %0=[%1],%[new_val],ar.ccv"
+ : "=r"(fetched_val) AO_OUT_ADDR
+ : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
+ : "memory");
+ return fetched_val;
+}
+#define AO_HAVE_short_fetch_compare_and_swap_acquire
+
+AO_INLINE unsigned short
+AO_short_fetch_compare_and_swap_release(volatile unsigned short *addr,
+ unsigned short old, unsigned short new_val)
+{
+ unsigned short fetched_val;
+ __asm__ __volatile__(AO_SWIZZLE
+ "mov ar.ccv=%[old] ;; cmpxchg2.rel %0=[%1],%[new_val],ar.ccv"
+ : "=r"(fetched_val) AO_OUT_ADDR
+ : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
+ : "memory");
+ return fetched_val;
+}
+#define AO_HAVE_short_fetch_compare_and_swap_release
+
+#ifdef _ILP32
+
+# define AO_T_IS_INT
+
+ /* TODO: Add compare_double_and_swap_double for the _ILP32 case. */
+#else
+
+# ifndef AO_PREFER_GENERALIZED
+ AO_INLINE unsigned int
+ AO_int_fetch_and_add1_acquire(volatile unsigned int *addr)
+ {
+ unsigned int result;
+ __asm__ __volatile__("fetchadd4.acq %0=[%1],1"
+ : "=r" (result) : AO_IN_ADDR
+ : "memory");
+ return result;
+ }
+# define AO_HAVE_int_fetch_and_add1_acquire
+
+ AO_INLINE unsigned int
+ AO_int_fetch_and_add1_release(volatile unsigned int *addr)
+ {
+ unsigned int result;
+ __asm__ __volatile__("fetchadd4.rel %0=[%1],1"
+ : "=r" (result) : AO_IN_ADDR
+ : "memory");
+ return result;
+ }
+# define AO_HAVE_int_fetch_and_add1_release
+
+ AO_INLINE unsigned int
+ AO_int_fetch_and_sub1_acquire(volatile unsigned int *addr)
+ {
+ unsigned int result;
+ __asm__ __volatile__("fetchadd4.acq %0=[%1],-1"
+ : "=r" (result) : AO_IN_ADDR
+ : "memory");
+ return result;
+ }
+# define AO_HAVE_int_fetch_and_sub1_acquire
+
+ AO_INLINE unsigned int
+ AO_int_fetch_and_sub1_release(volatile unsigned int *addr)
+ {
+ unsigned int result;
+ __asm__ __volatile__("fetchadd4.rel %0=[%1],-1"
+ : "=r" (result) : AO_IN_ADDR
+ : "memory");
+ return result;
+ }
+# define AO_HAVE_int_fetch_and_sub1_release
+# endif /* !AO_PREFER_GENERALIZED */
+
+ AO_INLINE unsigned int
+ AO_int_fetch_compare_and_swap_acquire(volatile unsigned int *addr,
+ unsigned int old, unsigned int new_val)
+ {
+ unsigned int fetched_val;
+ __asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg4.acq %0=[%1],%2,ar.ccv"
+ : "=r"(fetched_val)
+ : AO_IN_ADDR, "r"(new_val), "r"((AO_t)old)
+ : "memory");
+ return fetched_val;
+ }
+# define AO_HAVE_int_fetch_compare_and_swap_acquire
+
+ AO_INLINE unsigned int
+ AO_int_fetch_compare_and_swap_release(volatile unsigned int *addr,
+ unsigned int old, unsigned int new_val)
+ {
+ unsigned int fetched_val;
+ __asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg4.rel %0=[%1],%2,ar.ccv"
+ : "=r"(fetched_val)
+ : AO_IN_ADDR, "r"(new_val), "r"((AO_t)old)
+ : "memory");
+ return fetched_val;
+ }
+# define AO_HAVE_int_fetch_compare_and_swap_release
+#endif /* !_ILP32 */
+
+/* TODO: Add compare_and_swap_double as soon as there is widely */
+/* available hardware that implements it. */
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+/* The cas instruction causes an emulation trap for the */
+/* 060 with a misaligned pointer, so let's avoid this. */
+#undef AO_t
+typedef unsigned long AO_t __attribute__ ((aligned (4)));
+
+/* FIXME. Very incomplete. */
+#include "../all_aligned_atomic_load_store.h"
+
+/* Are there any m68k multiprocessors still around? */
+/* AFAIK, Alliants were sequentially consistent. */
+#include "../ordered.h"
+
+#include "../test_and_set_t_is_char.h"
+
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t *addr) {
+ AO_TS_t oldval;
+
+ /* The value at addr is semi-phony. */
+ /* 'tas' sets bit 7 while the return */
+ /* value pretends all bits were set, */
+ /* which at least matches AO_TS_SET. */
+ __asm__ __volatile__(
+ "tas %1; sne %0"
+ : "=d" (oldval), "=m" (*addr)
+ : "m" (*addr)
+ : "memory");
+ /* This cast works due to the above. */
+ return (AO_TS_VAL_t)oldval;
+}
+#define AO_HAVE_test_and_set_full
+
+/* Returns nonzero if the comparison succeeded. */
+AO_INLINE int
+AO_compare_and_swap_full(volatile AO_t *addr,
+ AO_t old, AO_t new_val)
+{
+ char result;
+
+ __asm__ __volatile__(
+ "cas.l %3,%4,%1; seq %0"
+ : "=d" (result), "=m" (*addr)
+ : "m" (*addr), "d" (old), "d" (new_val)
+ : "memory");
+ return -result;
+}
+#define AO_HAVE_compare_and_swap_full
+
+/* TODO: implement AO_fetch_compare_and_swap. */
+
+#define AO_T_IS_INT
--- /dev/null
+/*
+ * Copyright (c) 2005,2007 Thiemo Seufer <ths@networkno.de>
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+/*
+ * FIXME: This should probably make finer distinctions. SGI MIPS is
+ * much more strongly ordered, and in fact closer to sequentially
+ * consistent. This is really aimed at modern embedded implementations.
+ */
+
+#include "../all_aligned_atomic_load_store.h"
+
+#include "../test_and_set_t_is_ao_t.h"
+
+/* Data dependence does not imply read ordering. */
+#define AO_NO_DD_ORDERING
+
+#ifdef __mips64
+# define AO_MIPS_SET_ISA " .set mips3\n"
+# define AO_MIPS_LL_1(args) " lld " args "\n"
+# define AO_MIPS_SC(args) " scd " args "\n"
+#else
+# define AO_MIPS_SET_ISA " .set mips2\n"
+# define AO_MIPS_LL_1(args) " ll " args "\n"
+# define AO_MIPS_SC(args) " sc " args "\n"
+# define AO_T_IS_INT
+#endif
+
+#ifdef AO_ICE9A1_LLSC_WAR
+ /* ICE9 rev A1 chip (used in very few systems) is reported to */
+ /* have a low-frequency bug that causes LL to fail. */
+ /* To workaround, just issue the second 'LL'. */
+# define AO_MIPS_LL(args) AO_MIPS_LL_1(args) AO_MIPS_LL_1(args)
+#else
+# define AO_MIPS_LL(args) AO_MIPS_LL_1(args)
+#endif
+
+AO_INLINE void
+AO_nop_full(void)
+{
+ __asm__ __volatile__(
+ " .set push \n"
+ AO_MIPS_SET_ISA
+ " .set noreorder \n"
+ " .set nomacro \n"
+ " sync \n"
+ " .set pop "
+ : : : "memory");
+}
+#define AO_HAVE_nop_full
+
+#ifndef AO_PREFER_GENERALIZED
+AO_INLINE AO_t
+AO_fetch_and_add(volatile AO_t *addr, AO_t incr)
+{
+ register int result;
+ register int temp;
+
+ __asm__ __volatile__(
+ " .set push\n"
+ AO_MIPS_SET_ISA
+ " .set noreorder\n"
+ " .set nomacro\n"
+ "1: "
+ AO_MIPS_LL("%0, %2")
+ " addu %1, %0, %3\n"
+ AO_MIPS_SC("%1, %2")
+ " beqz %1, 1b\n"
+ " nop\n"
+ " .set pop "
+ : "=&r" (result), "=&r" (temp), "+m" (*addr)
+ : "Ir" (incr)
+ : "memory");
+ return (AO_t)result;
+}
+#define AO_HAVE_fetch_and_add
+
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set(volatile AO_TS_t *addr)
+{
+ register int oldval;
+ register int temp;
+
+ __asm__ __volatile__(
+ " .set push\n"
+ AO_MIPS_SET_ISA
+ " .set noreorder\n"
+ " .set nomacro\n"
+ "1: "
+ AO_MIPS_LL("%0, %2")
+ " move %1, %3\n"
+ AO_MIPS_SC("%1, %2")
+ " beqz %1, 1b\n"
+ " nop\n"
+ " .set pop "
+ : "=&r" (oldval), "=&r" (temp), "+m" (*addr)
+ : "r" (1)
+ : "memory");
+ return (AO_TS_VAL_t)oldval;
+}
+#define AO_HAVE_test_and_set
+
+ /* TODO: Implement AO_and/or/xor primitives directly. */
+#endif /* !AO_PREFER_GENERALIZED */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ AO_INLINE int
+ AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val)
+ {
+ register int was_equal = 0;
+ register int temp;
+
+ __asm__ __volatile__(
+ " .set push \n"
+ AO_MIPS_SET_ISA
+ " .set noreorder \n"
+ " .set nomacro \n"
+ "1: "
+ AO_MIPS_LL("%0, %1")
+ " bne %0, %4, 2f \n"
+ " move %0, %3 \n"
+ AO_MIPS_SC("%0, %1")
+ " .set pop \n"
+ " beqz %0, 1b \n"
+ " li %2, 1 \n"
+ "2: "
+ : "=&r" (temp), "+m" (*addr), "+r" (was_equal)
+ : "r" (new_val), "r" (old)
+ : "memory");
+ return was_equal;
+ }
+# define AO_HAVE_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val)
+{
+ register int fetched_val;
+ register int temp;
+
+ __asm__ __volatile__(
+ " .set push\n"
+ AO_MIPS_SET_ISA
+ " .set noreorder\n"
+ " .set nomacro\n"
+ "1: "
+ AO_MIPS_LL("%0, %2")
+ " bne %0, %4, 2f\n"
+ " move %1, %3\n"
+ AO_MIPS_SC("%1, %2")
+ " beqz %1, 1b\n"
+ " nop\n"
+ " .set pop\n"
+ "2:"
+ : "=&r" (fetched_val), "=&r" (temp), "+m" (*addr)
+ : "r" (new_val), "Jr" (old)
+ : "memory");
+ return (AO_t)fetched_val;
+}
+#define AO_HAVE_fetch_compare_and_swap
+
+/* #include "../standard_ao_double_t.h" */
+/* TODO: Implement double-wide operations if available. */
+
+/* CAS primitives with acquire, release and full semantics are */
+/* generated automatically (and AO_int_... primitives are */
+/* defined properly after the first generalization pass). */
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+/* Memory model documented at http://www-106.ibm.com/developerworks/ */
+/* eserver/articles/archguide.html and (clearer) */
+/* http://www-106.ibm.com/developerworks/eserver/articles/powerpc.html. */
+/* There appears to be no implicit ordering between any kind of */
+/* independent memory references. */
+/* Architecture enforces some ordering based on control dependence. */
+/* I don't know if that could help. */
+/* Data-dependent loads are always ordered. */
+/* Based on the above references, eieio is intended for use on */
+/* uncached memory, which we don't support. It does not order loads */
+/* from cached memory. */
+
+#include "../all_aligned_atomic_load_store.h"
+
+#include "../test_and_set_t_is_ao_t.h"
+ /* There seems to be no byte equivalent of lwarx, so this */
+ /* may really be what we want, at least in the 32-bit case. */
+
+AO_INLINE void
+AO_nop_full(void)
+{
+ __asm__ __volatile__("sync" : : : "memory");
+}
+#define AO_HAVE_nop_full
+
+/* lwsync apparently works for everything but a StoreLoad barrier. */
+AO_INLINE void
+AO_lwsync(void)
+{
+#ifdef __NO_LWSYNC__
+ __asm__ __volatile__("sync" : : : "memory");
+#else
+ __asm__ __volatile__("lwsync" : : : "memory");
+#endif
+}
+
+#define AO_nop_write() AO_lwsync()
+#define AO_HAVE_nop_write
+
+#define AO_nop_read() AO_lwsync()
+#define AO_HAVE_nop_read
+
+/* We explicitly specify load_acquire, since it is important, and can */
+/* be implemented relatively cheaply. It could be implemented */
+/* with an ordinary load followed by a lwsync. But the general wisdom */
+/* seems to be that a data dependent branch followed by an isync is */
+/* cheaper. And the documentation is fairly explicit that this also */
+/* has acquire semantics. */
+/* ppc64 uses ld not lwz */
+AO_INLINE AO_t
+AO_load_acquire(const volatile AO_t *addr)
+{
+ AO_t result;
+#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
+ __asm__ __volatile__ (
+ "ld%U1%X1 %0,%1\n"
+ "cmpw %0,%0\n"
+ "bne- 1f\n"
+ "1: isync\n"
+ : "=r" (result)
+ : "m"(*addr) : "memory", "cr0");
+#else
+ /* FIXME: We should get gcc to allocate one of the condition */
+ /* registers. I always got "impossible constraint" when I */
+ /* tried the "y" constraint. */
+ __asm__ __volatile__ (
+ "lwz%U1%X1 %0,%1\n"
+ "cmpw %0,%0\n"
+ "bne- 1f\n"
+ "1: isync\n"
+ : "=r" (result)
+ : "m"(*addr) : "memory", "cc");
+#endif
+ return result;
+}
+#define AO_HAVE_load_acquire
+
+/* We explicitly specify store_release, since it relies */
+/* on the fact that lwsync is also a LoadStore barrier. */
+AO_INLINE void
+AO_store_release(volatile AO_t *addr, AO_t value)
+{
+ AO_lwsync();
+ *addr = value;
+}
+#define AO_HAVE_store_release
+
+#ifndef AO_PREFER_GENERALIZED
+/* This is similar to the code in the garbage collector. Deleting */
+/* this and having it synthesized from compare_and_swap would probably */
+/* only cost us a load immediate instruction. */
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set(volatile AO_TS_t *addr) {
+#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
+/* Completely untested. And we should be using smaller objects anyway. */
+ unsigned long oldval;
+ unsigned long temp = 1; /* locked value */
+
+ __asm__ __volatile__(
+ "1:ldarx %0,0,%1\n" /* load and reserve */
+ "cmpdi %0, 0\n" /* if load is */
+ "bne 2f\n" /* non-zero, return already set */
+ "stdcx. %2,0,%1\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "2:\n" /* oldval is zero if we set */
+ : "=&r"(oldval)
+ : "r"(addr), "r"(temp)
+ : "memory", "cr0");
+#else
+ int oldval;
+ int temp = 1; /* locked value */
+
+ __asm__ __volatile__(
+ "1:lwarx %0,0,%1\n" /* load and reserve */
+ "cmpwi %0, 0\n" /* if load is */
+ "bne 2f\n" /* non-zero, return already set */
+ "stwcx. %2,0,%1\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "2:\n" /* oldval is zero if we set */
+ : "=&r"(oldval)
+ : "r"(addr), "r"(temp)
+ : "memory", "cr0");
+#endif
+ return (AO_TS_VAL_t)oldval;
+}
+#define AO_HAVE_test_and_set
+
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_acquire(volatile AO_TS_t *addr) {
+ AO_TS_VAL_t result = AO_test_and_set(addr);
+ AO_lwsync();
+ return result;
+}
+#define AO_HAVE_test_and_set_acquire
+
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_release(volatile AO_TS_t *addr) {
+ AO_lwsync();
+ return AO_test_and_set(addr);
+}
+#define AO_HAVE_test_and_set_release
+
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t *addr) {
+ AO_TS_VAL_t result;
+ AO_lwsync();
+ result = AO_test_and_set(addr);
+ AO_lwsync();
+ return result;
+}
+#define AO_HAVE_test_and_set_full
+#endif /* !AO_PREFER_GENERALIZED */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+
+ AO_INLINE int
+ AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val)
+ {
+ AO_t oldval;
+ int result = 0;
+# if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
+ __asm__ __volatile__(
+ "1:ldarx %0,0,%2\n" /* load and reserve */
+ "cmpd %0, %4\n" /* if load is not equal to */
+ "bne 2f\n" /* old, fail */
+ "stdcx. %3,0,%2\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "li %1,1\n" /* result = 1; */
+ "2:\n"
+ : "=&r"(oldval), "=&r"(result)
+ : "r"(addr), "r"(new_val), "r"(old), "1"(result)
+ : "memory", "cr0");
+# else
+ __asm__ __volatile__(
+ "1:lwarx %0,0,%2\n" /* load and reserve */
+ "cmpw %0, %4\n" /* if load is not equal to */
+ "bne 2f\n" /* old, fail */
+ "stwcx. %3,0,%2\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "li %1,1\n" /* result = 1; */
+ "2:\n"
+ : "=&r"(oldval), "=&r"(result)
+ : "r"(addr), "r"(new_val), "r"(old), "1"(result)
+ : "memory", "cr0");
+# endif
+ return result;
+ }
+# define AO_HAVE_compare_and_swap
+
+ AO_INLINE int
+ AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val)
+ {
+ int result = AO_compare_and_swap(addr, old, new_val);
+ AO_lwsync();
+ return result;
+ }
+# define AO_HAVE_compare_and_swap_acquire
+
+ AO_INLINE int
+ AO_compare_and_swap_release(volatile AO_t *addr, AO_t old, AO_t new_val)
+ {
+ AO_lwsync();
+ return AO_compare_and_swap(addr, old, new_val);
+ }
+# define AO_HAVE_compare_and_swap_release
+
+ AO_INLINE int
+ AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
+ {
+ int result;
+ AO_lwsync();
+ result = AO_compare_and_swap(addr, old, new_val);
+ AO_lwsync();
+ return result;
+ }
+# define AO_HAVE_compare_and_swap_full
+
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
+{
+ AO_t fetched_val;
+# if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
+ __asm__ __volatile__(
+ "1:ldarx %0,0,%1\n" /* load and reserve */
+ "cmpd %0, %3\n" /* if load is not equal to */
+ "bne 2f\n" /* old_val, fail */
+ "stdcx. %2,0,%1\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "2:\n"
+ : "=&r"(fetched_val)
+ : "r"(addr), "r"(new_val), "r"(old_val)
+ : "memory", "cr0");
+# else
+ __asm__ __volatile__(
+ "1:lwarx %0,0,%1\n" /* load and reserve */
+ "cmpw %0, %3\n" /* if load is not equal to */
+ "bne 2f\n" /* old_val, fail */
+ "stwcx. %2,0,%1\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "2:\n"
+ : "=&r"(fetched_val)
+ : "r"(addr), "r"(new_val), "r"(old_val)
+ : "memory", "cr0");
+# endif
+ return fetched_val;
+}
+#define AO_HAVE_fetch_compare_and_swap
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+{
+ AO_t result = AO_fetch_compare_and_swap(addr, old_val, new_val);
+ AO_lwsync();
+ return result;
+}
+#define AO_HAVE_fetch_compare_and_swap_acquire
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+{
+ AO_lwsync();
+ return AO_fetch_compare_and_swap(addr, old_val, new_val);
+}
+#define AO_HAVE_fetch_compare_and_swap_release
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+{
+ AO_t result;
+ AO_lwsync();
+ result = AO_fetch_compare_and_swap(addr, old_val, new_val);
+ AO_lwsync();
+ return result;
+}
+#define AO_HAVE_fetch_compare_and_swap_full
+
+#ifndef AO_PREFER_GENERALIZED
+AO_INLINE AO_t
+AO_fetch_and_add(volatile AO_t *addr, AO_t incr) {
+ AO_t oldval;
+ AO_t newval;
+#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
+ __asm__ __volatile__(
+ "1:ldarx %0,0,%2\n" /* load and reserve */
+ "add %1,%0,%3\n" /* increment */
+ "stdcx. %1,0,%2\n" /* store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ : "=&r"(oldval), "=&r"(newval)
+ : "r"(addr), "r"(incr)
+ : "memory", "cr0");
+#else
+ __asm__ __volatile__(
+ "1:lwarx %0,0,%2\n" /* load and reserve */
+ "add %1,%0,%3\n" /* increment */
+ "stwcx. %1,0,%2\n" /* store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ : "=&r"(oldval), "=&r"(newval)
+ : "r"(addr), "r"(incr)
+ : "memory", "cr0");
+#endif
+ return oldval;
+}
+#define AO_HAVE_fetch_and_add
+
+AO_INLINE AO_t
+AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr) {
+ AO_t result = AO_fetch_and_add(addr, incr);
+ AO_lwsync();
+ return result;
+}
+#define AO_HAVE_fetch_and_add_acquire
+
+AO_INLINE AO_t
+AO_fetch_and_add_release(volatile AO_t *addr, AO_t incr) {
+ AO_lwsync();
+ return AO_fetch_and_add(addr, incr);
+}
+#define AO_HAVE_fetch_and_add_release
+
+AO_INLINE AO_t
+AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr) {
+ AO_t result;
+ AO_lwsync();
+ result = AO_fetch_and_add(addr, incr);
+ AO_lwsync();
+ return result;
+}
+#define AO_HAVE_fetch_and_add_full
+#endif /* !AO_PREFER_GENERALIZED */
+
+#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
+ /* Empty */
+#else
+# define AO_T_IS_INT
+#endif
+
+/* TODO: Implement double-wide operations if available. */
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+/* FIXME: untested. */
+/* The relevant documentation appears to be at */
+/* http://publibz.boulder.ibm.com/epubs/pdf/dz9zr003.pdf */
+/* around page 5-96. Apparently: */
+/* - Memory references in general are atomic only for a single */
+/* byte. But it appears that the most common load/store */
+/* instructions also guarantee atomicity for aligned */
+/* operands of standard types. WE FOOLISHLY ASSUME that */
+/* compilers only generate those. If that turns out to be */
+/* wrong, we need inline assembly code for AO_load and */
+/* AO_store. */
+/* - A store followed by a load is unordered since the store */
+/* may be delayed. Otherwise everything is ordered. */
+/* - There is a hardware compare-and-swap (CS) instruction. */
+
+#include "../all_aligned_atomic_load_store.h"
+
+#include "../ordered_except_wr.h"
+
+#include "../test_and_set_t_is_ao_t.h"
+/* FIXME: Is there a way to do byte-sized test-and-set? */
+
+/* TODO: AO_nop_full should probably be implemented directly. */
+/* It appears that certain BCR instructions have that effect. */
+/* Presumably they're cheaper than CS? */
+
+AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr,
+ AO_t old, AO_t new_val)
+{
+ int retval;
+ __asm__ __volatile__ (
+# ifndef __s390x__
+ " cs %1,%2,0(%3)\n"
+# else
+ " csg %1,%2,0(%3)\n"
+# endif
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=&d" (retval), "+d" (old)
+ : "d" (new_val), "a" (addr)
+ : "cc", "memory");
+ return retval == 0;
+}
+#define AO_HAVE_compare_and_swap_full
+
+/* TODO: implement AO_fetch_compare_and_swap. */
+
+/* TODO: Add double-wide operations for 32-bit executables. */
--- /dev/null
+/*
+ * Copyright (c) 2009 by Takashi YOSHII. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+#include "../all_atomic_load_store.h"
+#include "../ordered.h"
+
+/* sh has tas.b(byte) only */
+#include "../test_and_set_t_is_char.h"
+
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t *addr)
+{
+ int oldval;
+ __asm__ __volatile__(
+ "tas.b @%1; movt %0"
+ : "=r" (oldval)
+ : "r" (addr)
+ : "t", "memory");
+ return oldval? AO_TS_CLEAR : AO_TS_SET;
+}
+#define AO_HAVE_test_and_set_full
+
+/* TODO: Very incomplete. */
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+/* TODO: Very incomplete; Add support for sparc64. */
+/* Non-ancient SPARCs provide compare-and-swap (casa). */
+
+#include "../all_atomic_load_store.h"
+
+/* Real SPARC code uses TSO: */
+#include "../ordered_except_wr.h"
+
+/* Test_and_set location is just a byte. */
+#include "../test_and_set_t_is_char.h"
+
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t *addr) {
+ AO_TS_VAL_t oldval;
+
+ __asm__ __volatile__("ldstub %1,%0"
+ : "=r"(oldval), "=m"(*addr)
+ : "m"(*addr) : "memory");
+ return oldval;
+}
+#define AO_HAVE_test_and_set_full
+
+#ifndef AO_NO_SPARC_V9
+/* Returns nonzero if the comparison succeeded. */
+AO_INLINE int
+AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) {
+ char ret;
+ __asm__ __volatile__ ("membar #StoreLoad | #LoadLoad\n\t"
+# if defined(__arch64__)
+ "casx [%2],%0,%1\n\t"
+# else
+ "cas [%2],%0,%1\n\t" /* 32-bit version */
+# endif
+ "membar #StoreLoad | #StoreStore\n\t"
+ "cmp %0,%1\n\t"
+ "be,a 0f\n\t"
+ "mov 1,%0\n\t"/* one insn after branch always executed */
+ "clr %0\n\t"
+ "0:\n\t"
+ : "=r" (ret), "+r" (new_val)
+ : "r" (addr), "0" (old)
+ : "memory", "cc");
+ return (int)ret;
+}
+#define AO_HAVE_compare_and_swap_full
+
+/* TODO: implement AO_fetch_compare_and_swap. */
+#endif /* !AO_NO_SPARC_V9 */
+
+/* TODO: Extend this for SPARC v8 and v9 (V8 also has swap, V9 has CAS, */
+/* there are barriers like membar #LoadStore, CASA (32-bit) and */
+/* CASXA (64-bit) instructions added in V9). */
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Some of the machine specific code was borrowed from our GC distribution.
+ */
+
+/* The following really assume we have a 486 or better. Unfortunately */
+/* gcc doesn't define a suitable feature test macro based on command */
+/* line options. */
+/* We should perhaps test dynamically. */
+
+#include "../all_aligned_atomic_load_store.h"
+
+#include "../test_and_set_t_is_char.h"
+
+#if defined(__SSE2__) && !defined(AO_USE_PENTIUM4_INSTRS)
+ /* "mfence" is a part of SSE2 set (introduced on Intel Pentium 4). */
+# define AO_USE_PENTIUM4_INSTRS
+#endif
+
+#if defined(AO_USE_PENTIUM4_INSTRS)
+ AO_INLINE void
+ AO_nop_full(void)
+ {
+ __asm__ __volatile__("mfence" : : : "memory");
+ }
+# define AO_HAVE_nop_full
+
+#else
+ /* We could use the cpuid instruction. But that seems to be slower */
+ /* than the default implementation based on test_and_set_full. Thus */
+ /* we omit that bit of misinformation here. */
+#endif /* !AO_USE_PENTIUM4_INSTRS */
+
+/* As far as we can tell, the lfence and sfence instructions are not */
+/* currently needed or useful for cached memory accesses. */
+
+/* Really only works for 486 and later */
+#ifndef AO_PREFER_GENERALIZED
+ AO_INLINE AO_t
+ AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
+ {
+ AO_t result;
+
+ __asm__ __volatile__ ("lock; xadd %0, %1" :
+ "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
+ : "memory");
+ return result;
+ }
+# define AO_HAVE_fetch_and_add_full
+#endif /* !AO_PREFER_GENERALIZED */
+
+AO_INLINE unsigned char
+AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
+{
+ unsigned char result;
+
+ __asm__ __volatile__ ("lock; xaddb %0, %1" :
+ "=q" (result), "=m" (*p) : "0" (incr), "m" (*p)
+ : "memory");
+ return result;
+}
+#define AO_HAVE_char_fetch_and_add_full
+
+AO_INLINE unsigned short
+AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
+{
+ unsigned short result;
+
+ __asm__ __volatile__ ("lock; xaddw %0, %1" :
+ "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
+ : "memory");
+ return result;
+}
+#define AO_HAVE_short_fetch_and_add_full
+
+#ifndef AO_PREFER_GENERALIZED
+ /* Really only works for 486 and later */
+ AO_INLINE void
+ AO_and_full (volatile AO_t *p, AO_t value)
+ {
+ __asm__ __volatile__ ("lock; and %1, %0" :
+ "=m" (*p) : "r" (value), "m" (*p)
+ : "memory");
+ }
+# define AO_HAVE_and_full
+
+ AO_INLINE void
+ AO_or_full (volatile AO_t *p, AO_t value)
+ {
+ __asm__ __volatile__ ("lock; or %1, %0" :
+ "=m" (*p) : "r" (value), "m" (*p)
+ : "memory");
+ }
+# define AO_HAVE_or_full
+
+ AO_INLINE void
+ AO_xor_full (volatile AO_t *p, AO_t value)
+ {
+ __asm__ __volatile__ ("lock; xor %1, %0" :
+ "=m" (*p) : "r" (value), "m" (*p)
+ : "memory");
+ }
+# define AO_HAVE_xor_full
+
+ /* AO_store_full could be implemented directly using "xchg" but it */
+ /* could be generalized efficiently as an ordinary store accomplished */
+ /* with AO_nop_full ("mfence" instruction). */
+#endif /* !AO_PREFER_GENERALIZED */
+
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t *addr)
+{
+ unsigned char oldval;
+ /* Note: the "xchg" instruction does not need a "lock" prefix */
+ __asm__ __volatile__ ("xchgb %0, %1"
+ : "=q" (oldval), "=m" (*addr)
+ : "0" ((unsigned char)0xff), "m" (*addr)
+ : "memory");
+ return (AO_TS_VAL_t)oldval;
+}
+#define AO_HAVE_test_and_set_full
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ /* Returns nonzero if the comparison succeeded. */
+ AO_INLINE int
+ AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
+ {
+# ifdef AO_USE_SYNC_CAS_BUILTIN
+ return (int)__sync_bool_compare_and_swap(addr, old, new_val
+ /* empty protection list */);
+ /* Note: an empty list of variables protected by the */
+ /* memory barrier should mean all globally accessible */
+ /* variables are protected. */
+# else
+ char result;
+ __asm__ __volatile__ ("lock; cmpxchg %3, %0; setz %1"
+ : "=m" (*addr), "=a" (result)
+ : "m" (*addr), "r" (new_val), "a" (old)
+ : "memory");
+ return (int)result;
+# endif
+ }
+# define AO_HAVE_compare_and_swap_full
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+{
+# ifdef AO_USE_SYNC_CAS_BUILTIN
+ return __sync_val_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+# else
+ AO_t fetched_val;
+ __asm__ __volatile__ ("lock; cmpxchg %3, %4"
+ : "=a" (fetched_val), "=m" (*addr)
+ : "a" (old_val), "r" (new_val), "m" (*addr)
+ : "memory");
+ return fetched_val;
+# endif
+}
+#define AO_HAVE_fetch_compare_and_swap_full
+
+#if !defined(__x86_64__) && !defined(AO_USE_SYNC_CAS_BUILTIN)
+# include "../standard_ao_double_t.h"
+
+ /* Reading or writing a quadword aligned on a 64-bit boundary is */
+ /* always carried out atomically on at least a Pentium according to */
+ /* Chapter 8.1.1 of Volume 3A Part 1 of Intel processor manuals. */
+# define AO_ACCESS_double_CHECK_ALIGNED
+# include "../loadstore/double_atomic_load_store.h"
+
+ /* Returns nonzero if the comparison succeeded. */
+ /* Really requires at least a Pentium. */
+ AO_INLINE int
+ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
+ AO_t old_val1, AO_t old_val2,
+ AO_t new_val1, AO_t new_val2)
+ {
+ char result;
+# ifdef __PIC__
+ AO_t saved_ebx;
+
+ /* If PIC is turned on, we cannot use ebx as it is reserved for the */
+ /* GOT pointer. We should save and restore ebx. The proposed */
+ /* solution is not so efficient as the older alternatives using */
+ /* push ebx or edi as new_val1 (w/o clobbering edi and temporary */
+ /* local variable usage) but it is more portable (it works even if */
+ /* ebx is not used as GOT pointer, and it works for the buggy GCC */
+ /* releases that incorrectly evaluate memory operands offset in the */
+ /* inline assembly after push). */
+# ifdef __OPTIMIZE__
+ __asm__ __volatile__("mov %%ebx, %2\n\t" /* save ebx */
+ "lea %0, %%edi\n\t" /* in case addr is in ebx */
+ "mov %7, %%ebx\n\t" /* load new_val1 */
+ "lock; cmpxchg8b (%%edi)\n\t"
+ "mov %2, %%ebx\n\t" /* restore ebx */
+ "setz %1"
+ : "=m" (*addr), "=a" (result), "=m" (saved_ebx)
+ : "m" (*addr), "d" (old_val2), "a" (old_val1),
+ "c" (new_val2), "m" (new_val1)
+ : "%edi", "memory");
+# else
+ /* A less-efficient code manually preserving edi if GCC invoked */
+ /* with -O0 option (otherwise it fails while finding a register */
+ /* in class 'GENERAL_REGS'). */
+ AO_t saved_edi;
+ __asm__ __volatile__("mov %%edi, %3\n\t" /* save edi */
+ "mov %%ebx, %2\n\t" /* save ebx */
+ "lea %0, %%edi\n\t" /* in case addr is in ebx */
+ "mov %8, %%ebx\n\t" /* load new_val1 */
+ "lock; cmpxchg8b (%%edi)\n\t"
+ "mov %2, %%ebx\n\t" /* restore ebx */
+ "mov %3, %%edi\n\t" /* restore edi */
+ "setz %1"
+ : "=m" (*addr), "=a" (result),
+ "=m" (saved_ebx), "=m" (saved_edi)
+ : "m" (*addr), "d" (old_val2), "a" (old_val1),
+ "c" (new_val2), "m" (new_val1) : "memory");
+# endif
+# else
+ /* For non-PIC mode, this operation could be simplified (and be */
+ /* faster) by using ebx as new_val1 (GCC would refuse to compile */
+ /* such code for PIC mode). */
+ __asm__ __volatile__ ("lock; cmpxchg8b %0; setz %1"
+ : "=m" (*addr), "=a" (result)
+ : "m" (*addr), "d" (old_val2), "a" (old_val1),
+ "c" (new_val2), "b" (new_val1)
+ : "memory");
+# endif
+ return (int) result;
+ }
+# define AO_HAVE_compare_double_and_swap_double_full
+
+# define AO_T_IS_INT
+
+#elif defined(__ILP32__) || !defined(__x86_64__)
+# include "../standard_ao_double_t.h"
+
+ /* Reading or writing a quadword aligned on a 64-bit boundary is */
+ /* always carried out atomically (requires at least a Pentium). */
+# define AO_ACCESS_double_CHECK_ALIGNED
+# include "../loadstore/double_atomic_load_store.h"
+
+ /* X32 has native support for 64-bit integer operations (AO_double_t */
+ /* is a 64-bit integer and we could use 64-bit cmpxchg). */
+ /* This primitive is used by compare_double_and_swap_double_full. */
+ AO_INLINE int
+ AO_double_compare_and_swap_full(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ /* It is safe to use __sync CAS built-in here. */
+ return __sync_bool_compare_and_swap(&addr->AO_whole,
+ old_val.AO_whole, new_val.AO_whole
+ /* empty protection list */);
+ }
+# define AO_HAVE_double_compare_and_swap_full
+
+# define AO_T_IS_INT
+
+#else /* 64-bit */
+
+ AO_INLINE unsigned int
+ AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
+ {
+ unsigned int result;
+
+ __asm__ __volatile__ ("lock; xaddl %0, %1"
+ : "=r" (result), "=m" (*p)
+ : "0" (incr), "m" (*p)
+ : "memory");
+ return result;
+ }
+# define AO_HAVE_int_fetch_and_add_full
+
+ /* The Intel and AMD Architecture Programmer Manuals state roughly */
+ /* the following: */
+ /* - CMPXCHG16B (with a LOCK prefix) can be used to perform 16-byte */
+ /* atomic accesses in 64-bit mode (with certain alignment */
+ /* restrictions); */
+ /* - SSE instructions that access data larger than a quadword (like */
+ /* MOVDQA) may be implemented using multiple memory accesses; */
+ /* - LOCK prefix causes an invalid-opcode exception when used with */
+ /* 128-bit media (SSE) instructions. */
+ /* Thus, currently, the only way to implement lock-free double_load */
+ /* and double_store on x86_64 is to use CMPXCHG16B (if available). */
+
+/* TODO: Test some gcc macro to detect presence of cmpxchg16b. */
+
+# ifdef AO_CMPXCHG16B_AVAILABLE
+# include "../standard_ao_double_t.h"
+
+ /* NEC LE-IT: older AMD Opterons are missing this instruction. */
+ /* On these machines SIGILL will be thrown. */
+ /* Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated (lock */
+ /* based) version available. */
+ /* HB: Changed this to not define either by default. There are */
+ /* enough machines and tool chains around on which cmpxchg16b */
+ /* doesn't work. And the emulation is unsafe by our usual rules. */
+ /* However both are clearly useful in certain cases. */
+ AO_INLINE int
+ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
+ AO_t old_val1, AO_t old_val2,
+ AO_t new_val1, AO_t new_val2)
+ {
+ char result;
+ __asm__ __volatile__("lock; cmpxchg16b %0; setz %1"
+ : "=m"(*addr), "=a"(result)
+ : "m"(*addr), "d" (old_val2), "a" (old_val1),
+ "c" (new_val2), "b" (new_val1)
+ : "memory");
+ return (int) result;
+ }
+# define AO_HAVE_compare_double_and_swap_double_full
+
+# elif defined(AO_WEAK_DOUBLE_CAS_EMULATION)
+# include "../standard_ao_double_t.h"
+
+ /* This one provides spinlock based emulation of CAS implemented in */
+ /* atomic_ops.c. We probably do not want to do this here, since it */
+ /* is not atomic with respect to other kinds of updates of *addr. */
+ /* On the other hand, this may be a useful facility on occasion. */
+ int AO_compare_double_and_swap_double_emulation(
+ volatile AO_double_t *addr,
+ AO_t old_val1, AO_t old_val2,
+ AO_t new_val1, AO_t new_val2);
+
+ AO_INLINE int
+ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
+ AO_t old_val1, AO_t old_val2,
+ AO_t new_val1, AO_t new_val2)
+ {
+ return AO_compare_double_and_swap_double_emulation(addr,
+ old_val1, old_val2, new_val1, new_val2);
+ }
+# define AO_HAVE_compare_double_and_swap_double_full
+# endif /* AO_WEAK_DOUBLE_CAS_EMULATION && !AO_CMPXCHG16B_AVAILABLE */
+
+#endif /* x86_64 && !ILP32 */
+
+/* Real X86 implementations, except for some old 32-bit WinChips, */
+/* appear to enforce ordering between memory operations, EXCEPT that */
+/* a later read can pass earlier writes, presumably due to the visible */
+/* presence of store buffers. */
+/* We ignore both the WinChips and the fact that the official specs */
+/* seem to be much weaker (and arguably too weak to be usable). */
+#include "../ordered_except_wr.h"
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* The following is useful primarily for debugging and documentation. */
+/* We define various atomic operations by acquiring a global pthread */
+/* lock. The resulting implementation will perform poorly, but should */
+/* be correct unless it is used from signal handlers. */
+/* We assume that all pthread operations act like full memory barriers. */
+/* (We believe that is the intent of the specification.) */
+
+#include <pthread.h>
+
+#include "test_and_set_t_is_ao_t.h"
+ /* This is not necessarily compatible with the native */
+ /* implementation. But those can't be safely mixed anyway. */
+
+/* We define only the full barrier variants, and count on the */
+/* generalization section below to fill in the rest. */
+extern pthread_mutex_t AO_pt_lock;
+
+AO_INLINE void
+AO_nop_full(void)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_nop_full
+
+AO_INLINE AO_t
+AO_load_full(const volatile AO_t *addr)
+{
+ AO_t result;
+ pthread_mutex_lock(&AO_pt_lock);
+ result = *addr;
+ pthread_mutex_unlock(&AO_pt_lock);
+ return result;
+}
+#define AO_HAVE_load_full
+
+AO_INLINE void
+AO_store_full(volatile AO_t *addr, AO_t val)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ *addr = val;
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_store_full
+
+AO_INLINE unsigned char
+AO_char_load_full(const volatile unsigned char *addr)
+{
+ unsigned char result;
+ pthread_mutex_lock(&AO_pt_lock);
+ result = *addr;
+ pthread_mutex_unlock(&AO_pt_lock);
+ return result;
+}
+#define AO_HAVE_char_load_full
+
+AO_INLINE void
+AO_char_store_full(volatile unsigned char *addr, unsigned char val)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ *addr = val;
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_char_store_full
+
+AO_INLINE unsigned short
+AO_short_load_full(const volatile unsigned short *addr)
+{
+ unsigned short result;
+ pthread_mutex_lock(&AO_pt_lock);
+ result = *addr;
+ pthread_mutex_unlock(&AO_pt_lock);
+ return result;
+}
+#define AO_HAVE_short_load_full
+
+AO_INLINE void
+AO_short_store_full(volatile unsigned short *addr, unsigned short val)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ *addr = val;
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_short_store_full
+
+AO_INLINE unsigned int
+AO_int_load_full(const volatile unsigned int *addr)
+{
+ unsigned int result;
+ pthread_mutex_lock(&AO_pt_lock);
+ result = *addr;
+ pthread_mutex_unlock(&AO_pt_lock);
+ return result;
+}
+#define AO_HAVE_int_load_full
+
+AO_INLINE void
+AO_int_store_full(volatile unsigned int *addr, unsigned int val)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ *addr = val;
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_int_store_full
+
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t *addr)
+{
+ AO_TS_VAL_t result;
+ pthread_mutex_lock(&AO_pt_lock);
+ result = (AO_TS_VAL_t)(*addr);
+ *addr = AO_TS_SET;
+ pthread_mutex_unlock(&AO_pt_lock);
+ assert(result == AO_TS_SET || result == AO_TS_CLEAR);
+ return result;
+}
+#define AO_HAVE_test_and_set_full
+
+AO_INLINE AO_t
+AO_fetch_and_add_full(volatile AO_t *p, AO_t incr)
+{
+ AO_t old_val;
+
+ pthread_mutex_lock(&AO_pt_lock);
+ old_val = *p;
+ *p = old_val + incr;
+ pthread_mutex_unlock(&AO_pt_lock);
+ return old_val;
+}
+#define AO_HAVE_fetch_and_add_full
+
+AO_INLINE unsigned char
+AO_char_fetch_and_add_full(volatile unsigned char *p, unsigned char incr)
+{
+ unsigned char old_val;
+
+ pthread_mutex_lock(&AO_pt_lock);
+ old_val = *p;
+ *p = old_val + incr;
+ pthread_mutex_unlock(&AO_pt_lock);
+ return old_val;
+}
+#define AO_HAVE_char_fetch_and_add_full
+
+AO_INLINE unsigned short
+AO_short_fetch_and_add_full(volatile unsigned short *p, unsigned short incr)
+{
+ unsigned short old_val;
+
+ pthread_mutex_lock(&AO_pt_lock);
+ old_val = *p;
+ *p = old_val + incr;
+ pthread_mutex_unlock(&AO_pt_lock);
+ return old_val;
+}
+#define AO_HAVE_short_fetch_and_add_full
+
+AO_INLINE unsigned int
+AO_int_fetch_and_add_full(volatile unsigned int *p, unsigned int incr)
+{
+ unsigned int old_val;
+
+ pthread_mutex_lock(&AO_pt_lock);
+ old_val = *p;
+ *p = old_val + incr;
+ pthread_mutex_unlock(&AO_pt_lock);
+ return old_val;
+}
+#define AO_HAVE_int_fetch_and_add_full
+
+AO_INLINE void
+AO_and_full(volatile AO_t *p, AO_t value)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ *p &= value;
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_and_full
+
+AO_INLINE void
+AO_or_full(volatile AO_t *p, AO_t value)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ *p |= value;
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_or_full
+
+AO_INLINE void
+AO_xor_full(volatile AO_t *p, AO_t value)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ *p ^= value;
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_xor_full
+
+AO_INLINE void
+AO_char_and_full(volatile unsigned char *p, unsigned char value)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ *p &= value;
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_char_and_full
+
+AO_INLINE void
+AO_char_or_full(volatile unsigned char *p, unsigned char value)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ *p |= value;
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_char_or_full
+
+AO_INLINE void
+AO_char_xor_full(volatile unsigned char *p, unsigned char value)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ *p ^= value;
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_char_xor_full
+
+AO_INLINE void
+AO_short_and_full(volatile unsigned short *p, unsigned short value)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ *p &= value;
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_short_and_full
+
+AO_INLINE void
+AO_short_or_full(volatile unsigned short *p, unsigned short value)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ *p |= value;
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_short_or_full
+
+AO_INLINE void
+AO_short_xor_full(volatile unsigned short *p, unsigned short value)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ *p ^= value;
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_short_xor_full
+
+AO_INLINE void
+AO_int_and_full(volatile unsigned *p, unsigned value)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ *p &= value;
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_int_and_full
+
+AO_INLINE void
+AO_int_or_full(volatile unsigned *p, unsigned value)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ *p |= value;
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_int_or_full
+
+AO_INLINE void
+AO_int_xor_full(volatile unsigned *p, unsigned value)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ *p ^= value;
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_int_xor_full
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+{
+ AO_t fetched_val;
+
+ pthread_mutex_lock(&AO_pt_lock);
+ fetched_val = *addr;
+ if (fetched_val == old_val)
+ *addr = new_val;
+ pthread_mutex_unlock(&AO_pt_lock);
+ return fetched_val;
+}
+#define AO_HAVE_fetch_compare_and_swap_full
+
+AO_INLINE unsigned char
+AO_char_fetch_compare_and_swap_full(volatile unsigned char *addr,
+ unsigned char old_val,
+ unsigned char new_val)
+{
+ unsigned char fetched_val;
+
+ pthread_mutex_lock(&AO_pt_lock);
+ fetched_val = *addr;
+ if (fetched_val == old_val)
+ *addr = new_val;
+ pthread_mutex_unlock(&AO_pt_lock);
+ return fetched_val;
+}
+#define AO_HAVE_char_fetch_compare_and_swap_full
+
+AO_INLINE unsigned short
+AO_short_fetch_compare_and_swap_full(volatile unsigned short *addr,
+ unsigned short old_val,
+ unsigned short new_val)
+{
+ unsigned short fetched_val;
+
+ pthread_mutex_lock(&AO_pt_lock);
+ fetched_val = *addr;
+ if (fetched_val == old_val)
+ *addr = new_val;
+ pthread_mutex_unlock(&AO_pt_lock);
+ return fetched_val;
+}
+#define AO_HAVE_short_fetch_compare_and_swap_full
+
+AO_INLINE unsigned
+AO_int_fetch_compare_and_swap_full(volatile unsigned *addr, unsigned old_val,
+ unsigned new_val)
+{
+ unsigned fetched_val;
+
+ pthread_mutex_lock(&AO_pt_lock);
+ fetched_val = *addr;
+ if (fetched_val == old_val)
+ *addr = new_val;
+ pthread_mutex_unlock(&AO_pt_lock);
+ return fetched_val;
+}
+#define AO_HAVE_int_fetch_compare_and_swap_full
+
+/* Unlike real architectures, we define both double-width CAS variants. */
+
+typedef struct {
+ AO_t AO_val1;
+ AO_t AO_val2;
+} AO_double_t;
+#define AO_HAVE_double_t
+
+#define AO_DOUBLE_T_INITIALIZER { (AO_t)0, (AO_t)0 }
+
+AO_INLINE AO_double_t
+AO_double_load_full(const volatile AO_double_t *addr)
+{
+ AO_double_t result;
+
+ pthread_mutex_lock(&AO_pt_lock);
+ result.AO_val1 = addr->AO_val1;
+ result.AO_val2 = addr->AO_val2;
+ pthread_mutex_unlock(&AO_pt_lock);
+ return result;
+}
+#define AO_HAVE_double_load_full
+
+AO_INLINE void
+AO_double_store_full(volatile AO_double_t *addr, AO_double_t value)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ addr->AO_val1 = value.AO_val1;
+ addr->AO_val2 = value.AO_val2;
+ pthread_mutex_unlock(&AO_pt_lock);
+}
+#define AO_HAVE_double_store_full
+
+AO_INLINE int
+AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
+ AO_t old1, AO_t old2,
+ AO_t new1, AO_t new2)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ if (addr -> AO_val1 == old1 && addr -> AO_val2 == old2)
+ {
+ addr -> AO_val1 = new1;
+ addr -> AO_val2 = new2;
+ pthread_mutex_unlock(&AO_pt_lock);
+ return 1;
+ }
+ else
+ pthread_mutex_unlock(&AO_pt_lock);
+ return 0;
+}
+#define AO_HAVE_compare_double_and_swap_double_full
+
+AO_INLINE int
+AO_compare_and_swap_double_full(volatile AO_double_t *addr,
+ AO_t old1, AO_t new1, AO_t new2)
+{
+ pthread_mutex_lock(&AO_pt_lock);
+ if (addr -> AO_val1 == old1)
+ {
+ addr -> AO_val1 = new1;
+ addr -> AO_val2 = new2;
+ pthread_mutex_unlock(&AO_pt_lock);
+ return 1;
+ }
+ else
+ pthread_mutex_unlock(&AO_pt_lock);
+ return 0;
+}
+#define AO_HAVE_compare_and_swap_double_full
+
+/* We can't use hardware loads and stores, since they don't */
+/* interact correctly with atomic updates. */
--- /dev/null
+/*
+ * Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Derived from the corresponding header file for gcc.
+ */
+
+#include "../loadstore/atomic_load.h"
+#include "../loadstore/atomic_store.h"
+
+/* Some architecture set descriptions include special "ordered" memory */
+/* operations. As far as we can tell, no existing processors actually */
+/* require those. Nor does it appear likely that future processors */
+/* will. */
+/* FIXME: The PA emulator on Itanium may obey weaker restrictions. */
+/* There should be a mode in which we don't assume sequential */
+/* consistency here. */
+#include "../ordered.h"
+
+#include <machine/inline.h>
+
+/* GCC will not guarantee the alignment we need, use four lock words */
+/* and select the correctly aligned datum. See the glibc 2.3.2 */
+/* linuxthread port for the original implementation. */
+struct AO_pa_clearable_loc {
+ int data[4];
+};
+
+#undef AO_TS_INITIALIZER
+#define AO_TS_t struct AO_pa_clearable_loc
+#define AO_TS_INITIALIZER {1,1,1,1}
+/* Switch meaning of set and clear, since we only have an atomic clear */
+/* instruction. */
+typedef enum {AO_PA_TS_set = 0, AO_PA_TS_clear = 1} AO_PA_TS_val;
+#define AO_TS_VAL_t AO_PA_TS_val
+#define AO_TS_CLEAR AO_PA_TS_clear
+#define AO_TS_SET AO_PA_TS_set
+
+/* The hppa only has one atomic read and modify memory operation, */
+/* load and clear, so hppa spinlocks must use zero to signify that */
+/* someone is holding the lock. The address used for the ldcw */
+/* semaphore must be 16-byte aligned. */
+#define AO_ldcw(a, ret) \
+ _LDCWX(0 /* index */, 0 /* s */, a /* base */, ret)
+
+/* Because malloc only guarantees 8-byte alignment for malloc'd data, */
+/* and GCC only guarantees 8-byte alignment for stack locals, we can't */
+/* be assured of 16-byte alignment for atomic lock data even if we */
+/* specify "__attribute ((aligned(16)))" in the type declaration. So, */
+/* we use a struct containing an array of four ints for the atomic lock */
+/* type and dynamically select the 16-byte aligned int from the array */
+/* for the semaphore. */
+#define AO_PA_LDCW_ALIGNMENT 16
+#define AO_ldcw_align(addr) \
+ ((volatile unsigned *)(((unsigned long)(addr) \
+ + (AO_PA_LDCW_ALIGNMENT - 1)) \
+ & ~(AO_PA_LDCW_ALIGNMENT - 1)))
+
+/* Works on PA 1.1 and PA 2.0 systems */
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t * addr)
+{
+ register unsigned int ret;
+ register unsigned long a = (unsigned long)AO_ldcw_align(addr);
+
+ AO_ldcw(a, ret);
+ return (AO_TS_VAL_t)ret;
+}
+#define AO_HAVE_test_and_set_full
+
+AO_INLINE void
+AO_pa_clear(volatile AO_TS_t * addr)
+{
+ volatile unsigned *a = AO_ldcw_align(addr);
+
+ AO_compiler_barrier();
+ *a = 1;
+}
+#define AO_CLEAR(addr) AO_pa_clear(addr)
--- /dev/null
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file specifies Itanimum primitives for use with the HP compiler
+ * under HP/UX. We use intrinsics instead of the inline assembly code in the
+ * gcc file.
+ */
+
+#include "../all_atomic_load_store.h"
+
+#include "../all_acquire_release_volatile.h"
+
+#include "../test_and_set_t_is_char.h"
+
+#include <machine/sys/inline.h>
+
+#ifdef __LP64__
+# define AO_T_FASIZE _FASZ_D
+# define AO_T_SIZE _SZ_D
+#else
+# define AO_T_FASIZE _FASZ_W
+# define AO_T_SIZE _SZ_W
+#endif
+
+AO_INLINE void
+AO_nop_full(void)
+{
+ _Asm_mf();
+}
+#define AO_HAVE_nop_full
+
+#ifndef AO_PREFER_GENERALIZED
+AO_INLINE AO_t
+AO_fetch_and_add1_acquire (volatile AO_t *p)
+{
+ return _Asm_fetchadd(AO_T_FASIZE, _SEM_ACQ, p, 1,
+ _LDHINT_NONE, _DOWN_MEM_FENCE);
+}
+#define AO_HAVE_fetch_and_add1_acquire
+
+AO_INLINE AO_t
+AO_fetch_and_add1_release (volatile AO_t *p)
+{
+ return _Asm_fetchadd(AO_T_FASIZE, _SEM_REL, p, 1,
+ _LDHINT_NONE, _UP_MEM_FENCE);
+}
+#define AO_HAVE_fetch_and_add1_release
+
+AO_INLINE AO_t
+AO_fetch_and_sub1_acquire (volatile AO_t *p)
+{
+ return _Asm_fetchadd(AO_T_FASIZE, _SEM_ACQ, p, -1,
+ _LDHINT_NONE, _DOWN_MEM_FENCE);
+}
+#define AO_HAVE_fetch_and_sub1_acquire
+
+AO_INLINE AO_t
+AO_fetch_and_sub1_release (volatile AO_t *p)
+{
+ return _Asm_fetchadd(AO_T_FASIZE, _SEM_REL, p, -1,
+ _LDHINT_NONE, _UP_MEM_FENCE);
+}
+#define AO_HAVE_fetch_and_sub1_release
+#endif /* !AO_PREFER_GENERALIZED */
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+{
+ _Asm_mov_to_ar(_AREG_CCV, old_val, _DOWN_MEM_FENCE);
+ return _Asm_cmpxchg(AO_T_SIZE, _SEM_ACQ, addr,
+ new_val, _LDHINT_NONE, _DOWN_MEM_FENCE);
+}
+#define AO_HAVE_fetch_compare_and_swap_acquire
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+{
+ _Asm_mov_to_ar(_AREG_CCV, old_val, _UP_MEM_FENCE);
+ return _Asm_cmpxchg(AO_T_SIZE, _SEM_REL, addr,
+ new_val, _LDHINT_NONE, _UP_MEM_FENCE);
+
+}
+#define AO_HAVE_fetch_compare_and_swap_release
+
+AO_INLINE unsigned char
+AO_char_fetch_compare_and_swap_acquire(volatile unsigned char *addr,
+ unsigned char old_val, unsigned char new_val)
+{
+ _Asm_mov_to_ar(_AREG_CCV, old_val, _DOWN_MEM_FENCE);
+ return _Asm_cmpxchg(_SZ_B, _SEM_ACQ, addr,
+ new_val, _LDHINT_NONE, _DOWN_MEM_FENCE);
+
+}
+#define AO_HAVE_char_fetch_compare_and_swap_acquire
+
+AO_INLINE unsigned char
+AO_char_fetch_compare_and_swap_release(volatile unsigned char *addr,
+ unsigned char old_val, unsigned char new_val)
+{
+ _Asm_mov_to_ar(_AREG_CCV, old_val, _UP_MEM_FENCE);
+ return _Asm_cmpxchg(_SZ_B, _SEM_REL, addr,
+ new_val, _LDHINT_NONE, _UP_MEM_FENCE);
+
+}
+#define AO_HAVE_char_fetch_compare_and_swap_release
+
+AO_INLINE unsigned short
+AO_short_fetch_compare_and_swap_acquire(volatile unsigned short *addr,
+ unsigned short old_val,
+ unsigned short new_val)
+{
+ _Asm_mov_to_ar(_AREG_CCV, old_val, _DOWN_MEM_FENCE);
+ return _Asm_cmpxchg(_SZ_B, _SEM_ACQ, addr,
+ new_val, _LDHINT_NONE, _DOWN_MEM_FENCE);
+
+}
+#define AO_HAVE_short_fetch_compare_and_swap_acquire
+
+AO_INLINE unsigned short
+AO_short_fetch_compare_and_swap_release(volatile unsigned short *addr,
+ unsigned short old_val,
+ unsigned short new_val)
+{
+ _Asm_mov_to_ar(_AREG_CCV, old_val, _UP_MEM_FENCE);
+ return _Asm_cmpxchg(_SZ_B, _SEM_REL, addr,
+ new_val, _LDHINT_NONE, _UP_MEM_FENCE);
+
+}
+#define AO_HAVE_short_fetch_compare_and_swap_release
+
+#ifndef __LP64__
+# define AO_T_IS_INT
+#endif
--- /dev/null
+/* FIXME. This is only a placeholder for the AIX compiler. */
+/* It doesn't work. Please send a patch. */
+/* Memory model documented at http://www-106.ibm.com/developerworks/ */
+/* eserver/articles/archguide.html and (clearer) */
+/* http://www-106.ibm.com/developerworks/eserver/articles/powerpc.html. */
+/* There appears to be no implicit ordering between any kind of */
+/* independent memory references. */
+/* Architecture enforces some ordering based on control dependence. */
+/* I don't know if that could help. */
+/* Data-dependent loads are always ordered. */
+/* Based on the above references, eieio is intended for use on */
+/* uncached memory, which we don't support. It does not order loads */
+/* from cached memory. */
+/* Thanks to Maged Michael, Doug Lea, and Roger Hoover for helping to */
+/* track some of this down and correcting my misunderstandings. -HB */
+
+#include "../all_aligned_atomic_load_store.h"
+
+void AO_sync(void);
+#pragma mc_func AO_sync { "7c0004ac" }
+
+#ifdef __NO_LWSYNC__
+# define AO_lwsync AO_sync
+#else
+ void AO_lwsync(void);
+#pragma mc_func AO_lwsync { "7c2004ac" }
+#endif
+
+#define AO_nop_write() AO_lwsync()
+#define AO_HAVE_nop_write
+
+#define AO_nop_read() AO_lwsync()
+#define AO_HAVE_nop_read
+
+/* We explicitly specify load_acquire and store_release, since these */
+/* rely on the fact that lwsync is also a LoadStore barrier. */
+AO_INLINE AO_t
+AO_load_acquire(const volatile AO_t *addr)
+{
+ AO_t result = *addr;
+ AO_lwsync();
+ return result;
+}
+#define AO_HAVE_load_acquire
+
+AO_INLINE void
+AO_store_release(volatile AO_t *addr, AO_t value)
+{
+ AO_lwsync();
+ *addr = value;
+}
+#define AO_HAVE_store_release
+
+#ifndef AO_PREFER_GENERALIZED
+/* This is similar to the code in the garbage collector. Deleting */
+/* this and having it synthesized from compare_and_swap would probably */
+/* only cost us a load immediate instruction. */
+/*AO_INLINE AO_TS_VAL_t
+AO_test_and_set(volatile AO_TS_t *addr) {
+# error FIXME Implement me
+}
+#define AO_HAVE_test_and_set*/
+
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_acquire(volatile AO_TS_t *addr) {
+ AO_TS_VAL_t result = AO_test_and_set(addr);
+ AO_lwsync();
+ return result;
+}
+#define AO_HAVE_test_and_set_acquire
+
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_release(volatile AO_TS_t *addr) {
+ AO_lwsync();
+ return AO_test_and_set(addr);
+}
+#define AO_HAVE_test_and_set_release
+
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t *addr) {
+ AO_TS_VAL_t result;
+ AO_lwsync();
+ result = AO_test_and_set(addr);
+ AO_lwsync();
+ return result;
+}
+#define AO_HAVE_test_and_set_full
+#endif /* !AO_PREFER_GENERALIZED */
+
+/*AO_INLINE AO_t
+AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
+{
+# error FIXME Implement me
+}
+#define AO_HAVE_fetch_compare_and_swap*/
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+{
+ AO_t result = AO_fetch_compare_and_swap(addr, old_val, new_val);
+ AO_lwsync();
+ return result;
+}
+#define AO_HAVE_fetch_compare_and_swap_acquire
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+{
+ AO_lwsync();
+ return AO_fetch_compare_and_swap(addr, old_val, new_val);
+}
+#define AO_HAVE_fetch_compare_and_swap_release
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+{
+ AO_t result;
+ AO_lwsync();
+ result = AO_fetch_compare_and_swap(addr, old_val, new_val);
+ AO_lwsync();
+ return result;
+}
+#define AO_HAVE_fetch_compare_and_swap_full
+
+/* TODO: Implement AO_fetch_and_add, AO_and/or/xor directly. */
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file specifies Itanimum primitives for use with the Intel (ecc)
+ * compiler. We use intrinsics instead of the inline assembly code in the
+ * gcc file.
+ */
+
+#include "../all_atomic_load_store.h"
+
+#include "../test_and_set_t_is_char.h"
+
+#include <ia64intrin.h>
+
+/* The acquire release semantics of volatile can be turned off. And volatile */
+/* operations in icc9 don't imply ordering with respect to other nonvolatile */
+/* operations. */
+
+#define AO_INTEL_PTR_t void *
+
+AO_INLINE AO_t
+AO_load_acquire(const volatile AO_t *p)
+{
+ return (AO_t)(__ld8_acq((AO_INTEL_PTR_t)p));
+}
+#define AO_HAVE_load_acquire
+
+AO_INLINE void
+AO_store_release(volatile AO_t *p, AO_t val)
+{
+ __st8_rel((AO_INTEL_PTR_t)p, (__int64)val);
+}
+#define AO_HAVE_store_release
+
+AO_INLINE unsigned char
+AO_char_load_acquire(const volatile unsigned char *p)
+{
+ /* A normal volatile load generates an ld.acq */
+ return (__ld1_acq((AO_INTEL_PTR_t)p));
+}
+#define AO_HAVE_char_load_acquire
+
+AO_INLINE void
+AO_char_store_release(volatile unsigned char *p, unsigned char val)
+{
+ __st1_rel((AO_INTEL_PTR_t)p, val);
+}
+#define AO_HAVE_char_store_release
+
+AO_INLINE unsigned short
+AO_short_load_acquire(const volatile unsigned short *p)
+{
+ /* A normal volatile load generates an ld.acq */
+ return (__ld2_acq((AO_INTEL_PTR_t)p));
+}
+#define AO_HAVE_short_load_acquire
+
+AO_INLINE void
+AO_short_store_release(volatile unsigned short *p, unsigned short val)
+{
+ __st2_rel((AO_INTEL_PTR_t)p, val);
+}
+#define AO_HAVE_short_store_release
+
+AO_INLINE unsigned int
+AO_int_load_acquire(const volatile unsigned int *p)
+{
+ /* A normal volatile load generates an ld.acq */
+ return (__ld4_acq((AO_INTEL_PTR_t)p));
+}
+#define AO_HAVE_int_load_acquire
+
+AO_INLINE void
+AO_int_store_release(volatile unsigned int *p, unsigned int val)
+{
+ __st4_rel((AO_INTEL_PTR_t)p, val);
+}
+#define AO_HAVE_int_store_release
+
+AO_INLINE void
+AO_nop_full(void)
+{
+ __mf();
+}
+#define AO_HAVE_nop_full
+
+#ifndef AO_PREFER_GENERALIZED
+AO_INLINE AO_t
+AO_fetch_and_add1_acquire(volatile AO_t *p)
+{
+ return __fetchadd8_acq((unsigned __int64 *)p, 1);
+}
+#define AO_HAVE_fetch_and_add1_acquire
+
+AO_INLINE AO_t
+AO_fetch_and_add1_release(volatile AO_t *p)
+{
+ return __fetchadd8_rel((unsigned __int64 *)p, 1);
+}
+#define AO_HAVE_fetch_and_add1_release
+
+AO_INLINE AO_t
+AO_fetch_and_sub1_acquire(volatile AO_t *p)
+{
+ return __fetchadd8_acq((unsigned __int64 *)p, -1);
+}
+#define AO_HAVE_fetch_and_sub1_acquire
+
+AO_INLINE AO_t
+AO_fetch_and_sub1_release(volatile AO_t *p)
+{
+ return __fetchadd8_rel((unsigned __int64 *)p, -1);
+}
+#define AO_HAVE_fetch_and_sub1_release
+#endif /* !AO_PREFER_GENERALIZED */
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+{
+ return _InterlockedCompareExchange64_acq(addr, new_val, old_val);
+}
+#define AO_HAVE_fetch_compare_and_swap_acquire
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+{
+ return _InterlockedCompareExchange64_rel(addr, new_val, old_val);
+}
+#define AO_HAVE_fetch_compare_and_swap_release
+
+AO_INLINE unsigned char
+AO_char_fetch_compare_and_swap_acquire(volatile unsigned char *addr,
+ unsigned char old_val,
+ unsigned char new_val)
+{
+ return _InterlockedCompareExchange8_acq(addr, new_val, old_val);
+}
+#define AO_HAVE_char_fetch_compare_and_swap_acquire
+
+AO_INLINE unsigned char
+AO_char_fetch_compare_and_swap_release(volatile unsigned char *addr,
+ unsigned char old_val,
+ unsigned char new_val)
+{
+ return _InterlockedCompareExchange8_rel(addr, new_val, old_val);
+}
+#define AO_HAVE_char_fetch_compare_and_swap_release
+
+AO_INLINE unsigned short
+AO_short_fetch_compare_and_swap_acquire(volatile unsigned short *addr,
+ unsigned short old_val,
+ unsigned short new_val)
+{
+ return _InterlockedCompareExchange16_acq(addr, new_val, old_val);
+}
+#define AO_HAVE_short_fetch_compare_and_swap_acquire
+
+AO_INLINE unsigned short
+AO_short_fetch_compare_and_swap_release(volatile unsigned short *addr,
+ unsigned short old_val,
+ unsigned short new_val)
+{
+ return _InterlockedCompareExchange16_rel(addr, new_val, old_val);
+}
+#define AO_HAVE_short_fetch_compare_and_swap_release
+
+AO_INLINE unsigned int
+AO_int_fetch_compare_and_swap_acquire(volatile unsigned int *addr,
+ unsigned int old_val,
+ unsigned int new_val)
+{
+ return _InterlockedCompareExchange_acq(addr, new_val, old_val);
+}
+#define AO_HAVE_int_fetch_compare_and_swap_acquire
+
+AO_INLINE unsigned int
+AO_int_fetch_compare_and_swap_release(volatile unsigned int *addr,
+ unsigned int old_val,
+ unsigned int new_val)
+{
+ return _InterlockedCompareExchange_rel(addr, new_val, old_val);
+}
+#define AO_HAVE_int_fetch_compare_and_swap_release
--- /dev/null
+/*
+ * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file adds definitions appropriate for environments in which */
+/* volatile load of a given type has acquire semantics, and volatile */
+/* store of a given type has release semantics. This is arguably */
+/* supposed to be true with the standard Itanium software conventions. */
+/* Empirically gcc/ia64 does some reordering of ordinary operations */
+/* around volatiles even when we think it should not. GCC v3.3 and */
+/* earlier could reorder a volatile store with another store. As of */
+/* March 2005, gcc pre-4 reuses some previously computed common */
+/* subexpressions across a volatile load; hence, we now add compiler */
+/* barriers for gcc. */
+
+#ifndef AO_GCC_BARRIER
+ /* TODO: Check GCC version (if workaround not needed for modern GCC). */
+# if defined(__GNUC__)
+# define AO_GCC_BARRIER() AO_compiler_barrier()
+# else
+# define AO_GCC_BARRIER() (void)0
+# endif
+#endif
+
+AO_INLINE AO_t
+AO_load_acquire(const volatile AO_t *addr)
+{
+ AO_t result = *addr;
+
+ /* A normal volatile load generates an ld.acq (on IA-64). */
+ AO_GCC_BARRIER();
+ return result;
+}
+#define AO_HAVE_load_acquire
+
+AO_INLINE void
+AO_store_release(volatile AO_t *addr, AO_t new_val)
+{
+ AO_GCC_BARRIER();
+ /* A normal volatile store generates an st.rel (on IA-64). */
+ *addr = new_val;
+}
+#define AO_HAVE_store_release
--- /dev/null
+/*
+ * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file adds definitions appropriate for environments in which */
+/* volatile load of a given type has acquire semantics, and volatile */
+/* store of a given type has release semantics. This is arguably */
+/* supposed to be true with the standard Itanium software conventions. */
+/* Empirically gcc/ia64 does some reordering of ordinary operations */
+/* around volatiles even when we think it should not. GCC v3.3 and */
+/* earlier could reorder a volatile store with another store. As of */
+/* March 2005, gcc pre-4 reuses some previously computed common */
+/* subexpressions across a volatile load; hence, we now add compiler */
+/* barriers for gcc. */
+
+#ifndef AO_GCC_BARRIER
+ /* TODO: Check GCC version (if workaround not needed for modern GCC). */
+# if defined(__GNUC__)
+# define AO_GCC_BARRIER() AO_compiler_barrier()
+# else
+# define AO_GCC_BARRIER() (void)0
+# endif
+#endif
+
+AO_INLINE XCTYPE
+AO_XSIZE_load_acquire(const volatile XCTYPE *addr)
+{
+ XCTYPE result = *addr;
+
+ /* A normal volatile load generates an ld.acq (on IA-64). */
+ AO_GCC_BARRIER();
+ return result;
+}
+#define AO_HAVE_XSIZE_load_acquire
+
+AO_INLINE void
+AO_XSIZE_store_release(volatile XCTYPE *addr, XCTYPE new_val)
+{
+ AO_GCC_BARRIER();
+ /* A normal volatile store generates an st.rel (on IA-64). */
+ *addr = new_val;
+}
+#define AO_HAVE_XSIZE_store_release
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Definitions for architectures on which loads of given type are */
+/* atomic (either for suitably aligned data only or for any legal */
+/* alignment). */
+
+AO_INLINE AO_t
+AO_load(const volatile AO_t *addr)
+{
+# ifdef AO_ACCESS_CHECK_ALIGNED
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+# endif
+ /* Cast away the volatile for architectures like IA64 where */
+ /* volatile adds barrier (fence) semantics. */
+ return *(const AO_t *)addr;
+}
+#define AO_HAVE_load
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Definitions for architectures on which loads of given type are */
+/* atomic (either for suitably aligned data only or for any legal */
+/* alignment). */
+
+AO_INLINE XCTYPE
+AO_XSIZE_load(const volatile XCTYPE *addr)
+{
+# ifdef AO_ACCESS_XSIZE_CHECK_ALIGNED
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+# endif
+ /* Cast away the volatile for architectures like IA64 where */
+ /* volatile adds barrier (fence) semantics. */
+ return *(const XCTYPE *)addr;
+}
+#define AO_HAVE_XSIZE_load
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Definitions for architectures on which stores of given type are */
+/* atomic (either for suitably aligned data only or for any legal */
+/* alignment). */
+
+AO_INLINE void
+AO_store(volatile AO_t *addr, AO_t new_val)
+{
+# ifdef AO_ACCESS_CHECK_ALIGNED
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+# endif
+ *(AO_t *)addr = new_val;
+}
+#define AO_HAVE_store
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Definitions for architectures on which stores of given type are */
+/* atomic (either for suitably aligned data only or for any legal */
+/* alignment). */
+
+AO_INLINE void
+AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE new_val)
+{
+# ifdef AO_ACCESS_XSIZE_CHECK_ALIGNED
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+# endif
+ *(XCTYPE *)addr = new_val;
+}
+#define AO_HAVE_XSIZE_store
--- /dev/null
+/*
+ * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file adds definitions appropriate for environments in which */
+/* volatile load of a given type has acquire semantics, and volatile */
+/* store of a given type has release semantics. This is arguably */
+/* supposed to be true with the standard Itanium software conventions. */
+/* Empirically gcc/ia64 does some reordering of ordinary operations */
+/* around volatiles even when we think it should not. GCC v3.3 and */
+/* earlier could reorder a volatile store with another store. As of */
+/* March 2005, gcc pre-4 reuses some previously computed common */
+/* subexpressions across a volatile load; hence, we now add compiler */
+/* barriers for gcc. */
+
+#ifndef AO_GCC_BARRIER
+ /* TODO: Check GCC version (if workaround not needed for modern GCC). */
+# if defined(__GNUC__)
+# define AO_GCC_BARRIER() AO_compiler_barrier()
+# else
+# define AO_GCC_BARRIER() (void)0
+# endif
+#endif
+
+AO_INLINE unsigned/**/char
+AO_char_load_acquire(const volatile unsigned/**/char *addr)
+{
+ unsigned/**/char result = *addr;
+
+ /* A normal volatile load generates an ld.acq (on IA-64). */
+ AO_GCC_BARRIER();
+ return result;
+}
+#define AO_HAVE_char_load_acquire
+
+AO_INLINE void
+AO_char_store_release(volatile unsigned/**/char *addr, unsigned/**/char new_val)
+{
+ AO_GCC_BARRIER();
+ /* A normal volatile store generates an st.rel (on IA-64). */
+ *addr = new_val;
+}
+#define AO_HAVE_char_store_release
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Definitions for architectures on which loads of given type are */
+/* atomic (either for suitably aligned data only or for any legal */
+/* alignment). */
+
+AO_INLINE unsigned/**/char
+AO_char_load(const volatile unsigned/**/char *addr)
+{
+# ifdef AO_ACCESS_char_CHECK_ALIGNED
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+# endif
+ /* Cast away the volatile for architectures like IA64 where */
+ /* volatile adds barrier (fence) semantics. */
+ return *(const unsigned/**/char *)addr;
+}
+#define AO_HAVE_char_load
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Definitions for architectures on which stores of given type are */
+/* atomic (either for suitably aligned data only or for any legal */
+/* alignment). */
+
+AO_INLINE void
+AO_char_store(volatile unsigned/**/char *addr, unsigned/**/char new_val)
+{
+# ifdef AO_ACCESS_char_CHECK_ALIGNED
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+# endif
+ *(unsigned/**/char *)addr = new_val;
+}
+#define AO_HAVE_char_store
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Definitions for architectures on which AO_double_t loads and stores */
+/* are atomic (either for suitably aligned data only or for any legal */
+/* alignment). */
+
+AO_INLINE AO_double_t
+AO_double_load(const volatile AO_double_t *addr)
+{
+ AO_double_t result;
+
+# ifdef AO_ACCESS_double_CHECK_ALIGNED
+ assert(((size_t)addr & (sizeof(AO_double_t) - 1)) == 0);
+# endif
+ /* Cast away the volatile in case it adds fence semantics. */
+ result.AO_whole = ((const AO_double_t *)addr)->AO_whole;
+ return result;
+}
+#define AO_HAVE_double_load
+
+AO_INLINE void
+AO_double_store(volatile AO_double_t *addr, AO_double_t new_val)
+{
+# ifdef AO_ACCESS_double_CHECK_ALIGNED
+ assert(((size_t)addr & (sizeof(AO_double_t) - 1)) == 0);
+# endif
+ ((AO_double_t *)addr)->AO_whole = new_val.AO_whole;
+}
+#define AO_HAVE_double_store
--- /dev/null
+/*
+ * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file adds definitions appropriate for environments in which */
+/* volatile load of a given type has acquire semantics, and volatile */
+/* store of a given type has release semantics. This is arguably */
+/* supposed to be true with the standard Itanium software conventions. */
+/* Empirically gcc/ia64 does some reordering of ordinary operations */
+/* around volatiles even when we think it should not. GCC v3.3 and */
+/* earlier could reorder a volatile store with another store. As of */
+/* March 2005, gcc pre-4 reuses some previously computed common */
+/* subexpressions across a volatile load; hence, we now add compiler */
+/* barriers for gcc. */
+
+#ifndef AO_GCC_BARRIER
+ /* TODO: Check GCC version (if workaround not needed for modern GCC). */
+# if defined(__GNUC__)
+# define AO_GCC_BARRIER() AO_compiler_barrier()
+# else
+# define AO_GCC_BARRIER() (void)0
+# endif
+#endif
+
+AO_INLINE unsigned
+AO_int_load_acquire(const volatile unsigned *addr)
+{
+ unsigned result = *addr;
+
+ /* A normal volatile load generates an ld.acq (on IA-64). */
+ AO_GCC_BARRIER();
+ return result;
+}
+#define AO_HAVE_int_load_acquire
+
+AO_INLINE void
+AO_int_store_release(volatile unsigned *addr, unsigned new_val)
+{
+ AO_GCC_BARRIER();
+ /* A normal volatile store generates an st.rel (on IA-64). */
+ *addr = new_val;
+}
+#define AO_HAVE_int_store_release
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Definitions for architectures on which loads of given type are */
+/* atomic (either for suitably aligned data only or for any legal */
+/* alignment). */
+
+AO_INLINE unsigned
+AO_int_load(const volatile unsigned *addr)
+{
+# ifdef AO_ACCESS_int_CHECK_ALIGNED
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+# endif
+ /* Cast away the volatile for architectures like IA64 where */
+ /* volatile adds barrier (fence) semantics. */
+ return *(const unsigned *)addr;
+}
+#define AO_HAVE_int_load
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Definitions for architectures on which stores of given type are */
+/* atomic (either for suitably aligned data only or for any legal */
+/* alignment). */
+
+AO_INLINE void
+AO_int_store(volatile unsigned *addr, unsigned new_val)
+{
+# ifdef AO_ACCESS_int_CHECK_ALIGNED
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+# endif
+ *(unsigned *)addr = new_val;
+}
+#define AO_HAVE_int_store
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef AO_HAVE_char_load
+ /* char_load_read is defined in generalize-small. */
+# define AO_char_load_acquire(addr) AO_char_load_read(addr)
+# define AO_HAVE_char_load_acquire
+#endif
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef AO_HAVE_short_load
+ /* short_load_read is defined in generalize-small. */
+# define AO_short_load_acquire(addr) AO_short_load_read(addr)
+# define AO_HAVE_short_load_acquire
+#endif
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef AO_HAVE_int_load
+ /* int_load_read is defined in generalize-small. */
+# define AO_int_load_acquire(addr) AO_int_load_read(addr)
+# define AO_HAVE_int_load_acquire
+#endif
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef AO_HAVE_load
+ /* load_read is defined in generalize-small. */
+# define AO_load_acquire(addr) AO_load_read(addr)
+# define AO_HAVE_load_acquire
+#endif
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef AO_HAVE_double_load
+ /* double_load_read is defined in generalize-small. */
+# define AO_double_load_acquire(addr) AO_double_load_read(addr)
+# define AO_HAVE_double_load_acquire
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef AO_HAVE_XSIZE_load
+ /* XSIZE_load_read is defined in generalize-small. */
+# define AO_XSIZE_load_acquire(addr) AO_XSIZE_load_read(addr)
+# define AO_HAVE_XSIZE_load_acquire
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef AO_HAVE_char_store
+# define AO_char_store_release(addr, val) \
+ (AO_nop_write(), AO_char_store(addr, val))
+# define AO_HAVE_char_store_release
+#endif
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef AO_HAVE_short_store
+# define AO_short_store_release(addr, val) \
+ (AO_nop_write(), AO_short_store(addr, val))
+# define AO_HAVE_short_store_release
+#endif
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef AO_HAVE_int_store
+# define AO_int_store_release(addr, val) \
+ (AO_nop_write(), AO_int_store(addr, val))
+# define AO_HAVE_int_store_release
+#endif
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef AO_HAVE_store
+# define AO_store_release(addr, val) \
+ (AO_nop_write(), AO_store(addr, val))
+# define AO_HAVE_store_release
+#endif
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef AO_HAVE_double_store
+# define AO_double_store_release(addr, val) \
+ (AO_nop_write(), AO_double_store(addr, val))
+# define AO_HAVE_double_store_release
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef AO_HAVE_XSIZE_store
+# define AO_XSIZE_store_release(addr, val) \
+ (AO_nop_write(), AO_XSIZE_store(addr, val))
+# define AO_HAVE_XSIZE_store_release
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file adds definitions appropriate for environments in which */
+/* volatile load of a given type has acquire semantics, and volatile */
+/* store of a given type has release semantics. This is arguably */
+/* supposed to be true with the standard Itanium software conventions. */
+/* Empirically gcc/ia64 does some reordering of ordinary operations */
+/* around volatiles even when we think it should not. GCC v3.3 and */
+/* earlier could reorder a volatile store with another store. As of */
+/* March 2005, gcc pre-4 reuses some previously computed common */
+/* subexpressions across a volatile load; hence, we now add compiler */
+/* barriers for gcc. */
+
+#ifndef AO_GCC_BARRIER
+ /* TODO: Check GCC version (if workaround not needed for modern GCC). */
+# if defined(__GNUC__)
+# define AO_GCC_BARRIER() AO_compiler_barrier()
+# else
+# define AO_GCC_BARRIER() (void)0
+# endif
+#endif
+
+AO_INLINE unsigned/**/short
+AO_short_load_acquire(const volatile unsigned/**/short *addr)
+{
+ unsigned/**/short result = *addr;
+
+ /* A normal volatile load generates an ld.acq (on IA-64). */
+ AO_GCC_BARRIER();
+ return result;
+}
+#define AO_HAVE_short_load_acquire
+
+AO_INLINE void
+AO_short_store_release(volatile unsigned/**/short *addr, unsigned/**/short new_val)
+{
+ AO_GCC_BARRIER();
+ /* A normal volatile store generates an st.rel (on IA-64). */
+ *addr = new_val;
+}
+#define AO_HAVE_short_store_release
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Definitions for architectures on which loads of given type are */
+/* atomic (either for suitably aligned data only or for any legal */
+/* alignment). */
+
+AO_INLINE unsigned/**/short
+AO_short_load(const volatile unsigned/**/short *addr)
+{
+# ifdef AO_ACCESS_short_CHECK_ALIGNED
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+# endif
+ /* Cast away the volatile for architectures like IA64 where */
+ /* volatile adds barrier (fence) semantics. */
+ return *(const unsigned/**/short *)addr;
+}
+#define AO_HAVE_short_load
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Definitions for architectures on which stores of given type are */
+/* atomic (either for suitably aligned data only or for any legal */
+/* alignment). */
+
+AO_INLINE void
+AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short new_val)
+{
+# ifdef AO_ACCESS_short_CHECK_ALIGNED
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+# endif
+ *(unsigned/**/short *)addr = new_val;
+}
+#define AO_HAVE_short_store
--- /dev/null
+/*
+ * Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef AO_ASSUME_WINDOWS98
+ /* CAS is always available */
+# define AO_ASSUME_WINDOWS98
+#endif
+#include "common32_defs.h"
+/* FIXME: Do _InterlockedOps really have a full memory barrier? */
+/* (MSDN WinCE docs say nothing about it.) */
+
+#include "../test_and_set_t_is_ao_t.h"
+/* AO_test_and_set_full() is emulated using CAS. */
+
+/* Some ARM slide set, if it has been read correctly, claims that Loads */
+/* followed by either a Load or a Store are ordered, but nothing else. */
+/* It is assumed that Windows interrupt handlers clear the LL/SC flag. */
+/* Unaligned accesses are not guaranteed to be atomic. */
+#include "../all_aligned_atomic_load_store.h"
+
+/* If only a single processor is used, we can define AO_UNIPROCESSOR. */
+#ifdef AO_UNIPROCESSOR
+ AO_INLINE void AO_nop_full(void)
+ {
+ AO_compiler_barrier();
+ }
+# define AO_HAVE_nop_full
+#else
+ /* AO_nop_full() is emulated using AO_test_and_set_full(). */
+#endif
+
+#if _M_ARM >= 6
+/* ARMv6 is the first architecture providing support for simple LL/SC. */
+
+/* #include "../standard_ao_double_t.h" */
+/* TODO: implement double-wide operations (similar to x86). */
+
+#else /* _M_ARM < 6 */
+
+/* TODO: implement AO_test_and_set_full using SWP. */
+
+#endif /* _M_ARM < 6 */
+
+#define AO_T_IS_INT
--- /dev/null
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file contains AO primitives based on VC++ built-in intrinsic */
+/* functions commonly available across 32-bit architectures. */
+
+/* This file should be included from arch-specific header files. */
+/* Define AO_USE_INTERLOCKED_INTRINSICS if _Interlocked primitives */
+/* (used below) are available as intrinsic ones for a target arch */
+/* (otherwise "Interlocked" functions family is used instead). */
+/* Define AO_ASSUME_WINDOWS98 if CAS is available. */
+
+#include <windows.h>
+ /* Seems like over-kill, but that's what MSDN recommends. */
+ /* And apparently winbase.h is not always self-contained. */
+
+#if _MSC_VER < 1310 || !defined(AO_USE_INTERLOCKED_INTRINSICS)
+
+# define _InterlockedIncrement InterlockedIncrement
+# define _InterlockedDecrement InterlockedDecrement
+# define _InterlockedExchangeAdd InterlockedExchangeAdd
+# define _InterlockedCompareExchange InterlockedCompareExchange
+
+# define AO_INTERLOCKED_VOLATILE /**/
+
+#else /* elif _MSC_VER >= 1310 */
+
+# if _MSC_VER >= 1400
+# ifndef _WIN32_WCE
+# include <intrin.h>
+# endif
+
+# else /* elif _MSC_VER < 1400 */
+# ifdef __cplusplus
+ extern "C" {
+# endif
+ LONG __cdecl _InterlockedIncrement(LONG volatile *);
+ LONG __cdecl _InterlockedDecrement(LONG volatile *);
+ LONG __cdecl _InterlockedExchangeAdd(LONG volatile *, LONG);
+ LONG __cdecl _InterlockedCompareExchange(LONG volatile *,
+ LONG /* Exchange */, LONG /* Comp */);
+# ifdef __cplusplus
+ }
+# endif
+# endif /* _MSC_VER < 1400 */
+
+# if !defined(AO_PREFER_GENERALIZED) || !defined(AO_ASSUME_WINDOWS98)
+# pragma intrinsic (_InterlockedIncrement)
+# pragma intrinsic (_InterlockedDecrement)
+# pragma intrinsic (_InterlockedExchangeAdd)
+# endif /* !AO_PREFER_GENERALIZED */
+# pragma intrinsic (_InterlockedCompareExchange)
+
+# define AO_INTERLOCKED_VOLATILE volatile
+
+#endif /* _MSC_VER >= 1310 */
+
+#if !defined(AO_PREFER_GENERALIZED) || !defined(AO_ASSUME_WINDOWS98)
+AO_INLINE AO_t
+AO_fetch_and_add_full(volatile AO_t *p, AO_t incr)
+{
+ return _InterlockedExchangeAdd((LONG AO_INTERLOCKED_VOLATILE *)p,
+ (LONG)incr);
+}
+#define AO_HAVE_fetch_and_add_full
+
+AO_INLINE AO_t
+AO_fetch_and_add1_full(volatile AO_t *p)
+{
+ return _InterlockedIncrement((LONG AO_INTERLOCKED_VOLATILE *)p) - 1;
+}
+#define AO_HAVE_fetch_and_add1_full
+
+AO_INLINE AO_t
+AO_fetch_and_sub1_full(volatile AO_t *p)
+{
+ return _InterlockedDecrement((LONG AO_INTERLOCKED_VOLATILE *)p) + 1;
+}
+#define AO_HAVE_fetch_and_sub1_full
+#endif /* !AO_PREFER_GENERALIZED */
+
+#ifdef AO_ASSUME_WINDOWS98
+ AO_INLINE AO_t
+ AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+ {
+# ifdef AO_OLD_STYLE_INTERLOCKED_COMPARE_EXCHANGE
+ return (AO_t)_InterlockedCompareExchange(
+ (PVOID AO_INTERLOCKED_VOLATILE *)addr,
+ (PVOID)new_val, (PVOID)old_val);
+# else
+ return (AO_t)_InterlockedCompareExchange(
+ (LONG AO_INTERLOCKED_VOLATILE *)addr,
+ (LONG)new_val, (LONG)old_val);
+# endif
+ }
+# define AO_HAVE_fetch_compare_and_swap_full
+#endif /* AO_ASSUME_WINDOWS98 */
--- /dev/null
+/*
+ * Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* If AO_ASSUME_WINDOWS98 is defined, we assume Windows 98 or newer. */
+/* If AO_ASSUME_VISTA is defined, we assume Windows Server 2003, Vista */
+/* or later. */
+
+#include "../all_aligned_atomic_load_store.h"
+
+#include "../test_and_set_t_is_char.h"
+
+#if defined(AO_ASSUME_VISTA) && !defined(AO_ASSUME_WINDOWS98)
+# define AO_ASSUME_WINDOWS98
+#endif
+
+#ifndef AO_USE_INTERLOCKED_INTRINSICS
+ /* _Interlocked primitives (Inc, Dec, Xchg, Add) are always available */
+# define AO_USE_INTERLOCKED_INTRINSICS
+#endif
+#include "common32_defs.h"
+
+/* As far as we can tell, the lfence and sfence instructions are not */
+/* currently needed or useful for cached memory accesses. */
+
+/* Unfortunately mfence doesn't exist everywhere. */
+/* IsProcessorFeaturePresent(PF_COMPARE_EXCHANGE128) is */
+/* probably a conservative test for it? */
+
+#if defined(AO_USE_PENTIUM4_INSTRS)
+
+AO_INLINE void
+AO_nop_full(void)
+{
+ __asm { mfence }
+}
+#define AO_HAVE_nop_full
+
+#else
+
+/* We could use the cpuid instruction. But that seems to be slower */
+/* than the default implementation based on test_and_set_full. Thus */
+/* we omit that bit of misinformation here. */
+
+#endif
+
+#ifndef AO_NO_ASM_XADD
+ AO_INLINE unsigned char
+ AO_char_fetch_and_add_full(volatile unsigned char *p, unsigned char incr)
+ {
+ __asm
+ {
+ mov al, incr
+ mov ebx, p
+ lock xadd byte ptr [ebx], al
+ }
+ /* Ignore possible "missing return value" warning here. */
+ }
+# define AO_HAVE_char_fetch_and_add_full
+
+ AO_INLINE unsigned short
+ AO_short_fetch_and_add_full(volatile unsigned short *p, unsigned short incr)
+ {
+ __asm
+ {
+ mov ax, incr
+ mov ebx, p
+ lock xadd word ptr [ebx], ax
+ }
+ /* Ignore possible "missing return value" warning here. */
+ }
+# define AO_HAVE_short_fetch_and_add_full
+#endif /* !AO_NO_ASM_XADD */
+
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t *addr)
+{
+ __asm
+ {
+ mov eax,0xff ; /* AO_TS_SET */
+ mov ebx,addr ;
+ xchg byte ptr [ebx],al ;
+ }
+ /* Ignore possible "missing return value" warning here. */
+}
+#define AO_HAVE_test_and_set_full
+
+#ifdef _WIN64
+# error wrong architecture
+#endif
+
+#ifdef AO_ASSUME_VISTA
+# include "../standard_ao_double_t.h"
+
+ /* Reading or writing a quadword aligned on a 64-bit boundary is */
+ /* always carried out atomically (requires at least a Pentium). */
+# define AO_ACCESS_double_CHECK_ALIGNED
+# include "../loadstore/double_atomic_load_store.h"
+
+ /* Whenever we run on a Pentium class machine, we have that certain */
+ /* function. */
+# pragma intrinsic (_InterlockedCompareExchange64)
+
+ /* Returns nonzero if the comparison succeeded. */
+ AO_INLINE int
+ AO_double_compare_and_swap_full(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ return (double_ptr_storage)_InterlockedCompareExchange64(
+ (__int64 volatile *)addr,
+ new_val.AO_whole /* exchange */,
+ old_val.AO_whole) == old_val.AO_whole;
+ }
+# define AO_HAVE_double_compare_and_swap_full
+#endif /* AO_ASSUME_VISTA */
+
+#define AO_T_IS_INT
+
+/* Real X86 implementations, except for some old WinChips, appear */
+/* to enforce ordering between memory operations, EXCEPT that a later */
+/* read can pass earlier writes, presumably due to the visible */
+/* presence of store buffers. */
+/* We ignore both the WinChips, and the fact that the official specs */
+/* seem to be much weaker (and arguably too weak to be usable). */
+#include "../ordered_except_wr.h"
--- /dev/null
+/*
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "../all_aligned_atomic_load_store.h"
+
+/* Real X86 implementations appear */
+/* to enforce ordering between memory operations, EXCEPT that a later */
+/* read can pass earlier writes, presumably due to the visible */
+/* presence of store buffers. */
+/* We ignore the fact that the official specs */
+/* seem to be much weaker (and arguably too weak to be usable). */
+
+#include "../ordered_except_wr.h"
+
+#ifdef AO_ASM_X64_AVAILABLE
+# include "../test_and_set_t_is_char.h"
+#else
+# include "../test_and_set_t_is_ao_t.h"
+#endif
+
+#include <windows.h>
+ /* Seems like over-kill, but that's what MSDN recommends. */
+ /* And apparently winbase.h is not always self-contained. */
+
+/* Assume _MSC_VER >= 1400 */
+#include <intrin.h>
+
+#pragma intrinsic (_InterlockedExchangeAdd)
+#pragma intrinsic (_InterlockedCompareExchange64)
+
+#ifndef AO_PREFER_GENERALIZED
+
+# pragma intrinsic (_InterlockedIncrement64)
+# pragma intrinsic (_InterlockedDecrement64)
+# pragma intrinsic (_InterlockedExchangeAdd64)
+
+AO_INLINE AO_t
+AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
+{
+ return _InterlockedExchangeAdd64((LONGLONG volatile *)p, (LONGLONG)incr);
+}
+#define AO_HAVE_fetch_and_add_full
+
+AO_INLINE AO_t
+AO_fetch_and_add1_full (volatile AO_t *p)
+{
+ return _InterlockedIncrement64((LONGLONG volatile *)p) - 1;
+}
+#define AO_HAVE_fetch_and_add1_full
+
+AO_INLINE AO_t
+AO_fetch_and_sub1_full (volatile AO_t *p)
+{
+ return _InterlockedDecrement64((LONGLONG volatile *)p) + 1;
+}
+#define AO_HAVE_fetch_and_sub1_full
+#endif /* !AO_PREFER_GENERALIZED */
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+{
+ return (AO_t)_InterlockedCompareExchange64((LONGLONG volatile *)addr,
+ (LONGLONG)new_val, (LONGLONG)old_val);
+}
+#define AO_HAVE_fetch_compare_and_swap_full
+
+AO_INLINE unsigned int
+AO_int_fetch_and_add_full(volatile unsigned int *p, unsigned int incr)
+{
+ return _InterlockedExchangeAdd((LONG volatile *)p, incr);
+}
+#define AO_HAVE_int_fetch_and_add_full
+
+#ifdef AO_ASM_X64_AVAILABLE
+
+ AO_INLINE unsigned char
+ AO_char_fetch_and_add_full(volatile unsigned char *p, unsigned char incr)
+ {
+ __asm
+ {
+ mov al, incr
+ mov rbx, p
+ lock xadd byte ptr [rbx], al
+ }
+ }
+# define AO_HAVE_char_fetch_and_add_full
+
+ AO_INLINE unsigned short
+ AO_short_fetch_and_add_full(volatile unsigned short *p, unsigned short incr)
+ {
+ __asm
+ {
+ mov ax, incr
+ mov rbx, p
+ lock xadd word ptr [rbx], ax
+ }
+ }
+# define AO_HAVE_short_fetch_and_add_full
+
+/* As far as we can tell, the lfence and sfence instructions are not */
+/* currently needed or useful for cached memory accesses. */
+
+ AO_INLINE void
+ AO_nop_full(void)
+ {
+ /* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips. */
+ __asm { mfence }
+ }
+# define AO_HAVE_nop_full
+
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_full(volatile AO_TS_t *addr)
+ {
+ __asm
+ {
+ mov rax,AO_TS_SET ;
+ mov rbx,addr ;
+ xchg byte ptr [rbx],al ;
+ }
+ }
+# define AO_HAVE_test_and_set_full
+
+#endif /* AO_ASM_X64_AVAILABLE */
+
+#ifdef AO_CMPXCHG16B_AVAILABLE
+/* AO_compare_double_and_swap_double_full needs implementation for Win64.
+ * Also see ../gcc/x86.h for partial old Opteron workaround.
+ */
+
+# if _MSC_VER >= 1500
+
+# include "../standard_ao_double_t.h"
+
+# pragma intrinsic (_InterlockedCompareExchange128)
+
+AO_INLINE int
+AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
+ AO_t old_val1, AO_t old_val2,
+ AO_t new_val1, AO_t new_val2)
+{
+ __int64 comparandResult[2];
+ comparandResult[0] = old_val1; /* low */
+ comparandResult[1] = old_val2; /* high */
+ return _InterlockedCompareExchange128((volatile __int64 *)addr,
+ new_val2 /* high */, new_val1 /* low */, comparandResult);
+}
+# define AO_HAVE_compare_double_and_swap_double_full
+
+# elif defined(AO_ASM_X64_AVAILABLE)
+
+# include "../standard_ao_double_t.h"
+
+ /* If there is no intrinsic _InterlockedCompareExchange128 then we */
+ /* need basically what's given below. */
+AO_INLINE int
+AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
+ AO_t old_val1, AO_t old_val2,
+ AO_t new_val1, AO_t new_val2)
+{
+ __asm
+ {
+ mov rdx,QWORD PTR [old_val2] ;
+ mov rax,QWORD PTR [old_val1] ;
+ mov rcx,QWORD PTR [new_val2] ;
+ mov rbx,QWORD PTR [new_val1] ;
+ lock cmpxchg16b [addr] ;
+ setz rax ;
+ }
+}
+# define AO_HAVE_compare_double_and_swap_double_full
+# endif /* AO_ASM_X64_AVAILABLE && (_MSC_VER < 1500) */
+
+#endif /* AO_CMPXCHG16B_AVAILABLE */
--- /dev/null
+/*
+ * Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* These are common definitions for architectures that provide */
+/* processor ordered memory operations. */
+
+#include "ordered_except_wr.h"
+
+AO_INLINE void
+AO_nop_full(void)
+{
+ AO_compiler_barrier();
+}
+#define AO_HAVE_nop_full
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * These are common definitions for architectures that provide processor
+ * ordered memory operations except that a later read may pass an
+ * earlier write. Real x86 implementations seem to be in this category,
+ * except apparently for some IDT WinChips, which we ignore.
+ */
+
+#include "read_ordered.h"
+
+AO_INLINE void
+AO_nop_write(void)
+{
+ /* AO_nop_write implementation is the same as of AO_nop_read. */
+ AO_compiler_barrier();
+ /* sfence according to Intel docs. Pentium 3 and up. */
+ /* Unnecessary for cached accesses? */
+}
+#define AO_HAVE_nop_write
+
+#include "loadstore/ordered_stores_only.h"
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * These are common definitions for architectures that provide processor
+ * ordered memory operations except that a later read may pass an
+ * earlier write. Real x86 implementations seem to be in this category,
+ * except apparently for some IDT WinChips, which we ignore.
+ */
+
+AO_INLINE void
+AO_nop_read(void)
+{
+ AO_compiler_barrier();
+}
+#define AO_HAVE_nop_read
+
+#include "loadstore/ordered_loads_only.h"
--- /dev/null
+/*
+ * Copyright (c) 2004-2011 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* For 64-bit systems, we extend the double type to hold two int64's. */
+/* x86-64 (except for x32): __m128 serves as a placeholder which also */
+/* requires the compiler to align it on 16-byte boundary (as required */
+/* by cmpxchg16). */
+/* Similar things could be done for PPC 64-bit using a VMX data type. */
+
+#if ((defined(__x86_64__) && __GNUC__ >= 4) || defined(_WIN64)) \
+ && !defined(__ILP32__)
+# include <xmmintrin.h>
+ typedef __m128 double_ptr_storage;
+#elif defined(_WIN32) && !defined(__GNUC__)
+ typedef unsigned __int64 double_ptr_storage;
+#elif defined(__aarch64__)
+ typedef unsigned __int128 double_ptr_storage;
+#else
+ typedef unsigned long long double_ptr_storage;
+#endif
+# define AO_HAVE_DOUBLE_PTR_STORAGE
+
+typedef union {
+ struct { AO_t AO_v1; AO_t AO_v2; } AO_parts;
+ /* Note that AO_v1 corresponds to the low or the high part of */
+ /* AO_whole depending on the machine endianness. */
+ double_ptr_storage AO_whole;
+ /* AO_whole is now (starting from v7.3alpha3) the 2nd element */
+ /* of this union to make AO_DOUBLE_T_INITIALIZER portable */
+ /* (because __m128 definition could vary from a primitive type */
+ /* to a structure or array/vector). */
+} AO_double_t;
+#define AO_HAVE_double_t
+
+/* Dummy declaration as a compile-time assertion for AO_double_t size. */
+struct AO_double_t_size_static_assert {
+ char dummy[sizeof(AO_double_t) == 2 * sizeof(AO_t) ? 1 : -1];
+};
+
+#define AO_DOUBLE_T_INITIALIZER { { (AO_t)0, (AO_t)0 } }
+
+#define AO_val1 AO_parts.AO_v1
+#define AO_val2 AO_parts.AO_v2
--- /dev/null
+ .seg "text"
+ .globl AO_test_and_set_full
+AO_test_and_set_full:
+ retl
+ ldstub [%o0],%o0
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "../all_atomic_load_store.h"
+
+/* Real SPARC code uses TSO: */
+#include "../ordered_except_wr.h"
+
+/* Test_and_set location is just a byte. */
+#include "../test_and_set_t_is_char.h"
+
+extern AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t *addr);
+/* Implemented in separate .S file, for now. */
+#define AO_HAVE_test_and_set_full
+
+/* TODO: Like the gcc version, extend this for V8 and V9. */
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Some of the machine specific code was borrowed from our GC distribution.
+ */
+
+/* The following really assume we have a 486 or better. */
+
+#include "../all_aligned_atomic_load_store.h"
+
+#include "../test_and_set_t_is_char.h"
+
+#if !defined(AO_USE_PENTIUM4_INSTRS) && !defined(__i386)
+ /* "mfence" (SSE2) is supported on all x86_64/amd64 chips. */
+# define AO_USE_PENTIUM4_INSTRS
+#endif
+
+#if defined(AO_USE_PENTIUM4_INSTRS)
+ AO_INLINE void
+ AO_nop_full(void)
+ {
+ __asm__ __volatile__ ("mfence" : : : "memory");
+ }
+# define AO_HAVE_nop_full
+
+#else
+ /* We could use the cpuid instruction. But that seems to be slower */
+ /* than the default implementation based on test_and_set_full. Thus */
+ /* we omit that bit of misinformation here. */
+#endif /* !AO_USE_PENTIUM4_INSTRS */
+
+/* As far as we can tell, the lfence and sfence instructions are not */
+/* currently needed or useful for cached memory accesses. */
+
+/* Really only works for 486 and later */
+#ifndef AO_PREFER_GENERALIZED
+ AO_INLINE AO_t
+ AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
+ {
+ AO_t result;
+
+ __asm__ __volatile__ ("lock; xadd %0, %1"
+ : "=r" (result), "+m" (*p)
+ : "0" (incr)
+ : "memory");
+ return result;
+ }
+# define AO_HAVE_fetch_and_add_full
+#endif /* !AO_PREFER_GENERALIZED */
+
+AO_INLINE unsigned char
+AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
+{
+ unsigned char result;
+
+ __asm__ __volatile__ ("lock; xaddb %0, %1"
+ : "=q" (result), "+m" (*p)
+ : "0" (incr)
+ : "memory");
+ return result;
+}
+#define AO_HAVE_char_fetch_and_add_full
+
+AO_INLINE unsigned short
+AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
+{
+ unsigned short result;
+
+ __asm__ __volatile__ ("lock; xaddw %0, %1"
+ : "=r" (result), "+m" (*p)
+ : "0" (incr)
+ : "memory");
+ return result;
+}
+#define AO_HAVE_short_fetch_and_add_full
+
+#ifndef AO_PREFER_GENERALIZED
+ /* Really only works for 486 and later */
+ AO_INLINE void
+ AO_and_full (volatile AO_t *p, AO_t value)
+ {
+ __asm__ __volatile__ ("lock; and %1, %0"
+ : "+m" (*p)
+ : "r" (value)
+ : "memory");
+ }
+# define AO_HAVE_and_full
+
+ AO_INLINE void
+ AO_or_full (volatile AO_t *p, AO_t value)
+ {
+ __asm__ __volatile__ ("lock; or %1, %0"
+ : "+m" (*p)
+ : "r" (value)
+ : "memory");
+ }
+# define AO_HAVE_or_full
+
+ AO_INLINE void
+ AO_xor_full (volatile AO_t *p, AO_t value)
+ {
+ __asm__ __volatile__ ("lock; xor %1, %0"
+ : "+m" (*p)
+ : "r" (value)
+ : "memory");
+ }
+# define AO_HAVE_xor_full
+#endif /* !AO_PREFER_GENERALIZED */
+
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full (volatile AO_TS_t *addr)
+{
+ AO_TS_t oldval;
+ /* Note: the "xchg" instruction does not need a "lock" prefix */
+ __asm__ __volatile__ ("xchg %b0, %1"
+ : "=q" (oldval), "+m" (*addr)
+ : "0" (0xff)
+ : "memory");
+ return (AO_TS_VAL_t)oldval;
+}
+#define AO_HAVE_test_and_set_full
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ /* Returns nonzero if the comparison succeeded. */
+ AO_INLINE int
+ AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
+ {
+ char result;
+ __asm__ __volatile__ ("lock; cmpxchg %2, %0; setz %1"
+ : "+m" (*addr), "=a" (result)
+ : "r" (new_val), "a" (old)
+ : "memory");
+ return (int) result;
+ }
+# define AO_HAVE_compare_and_swap_full
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
+ AO_t new_val)
+{
+ AO_t fetched_val;
+ __asm__ __volatile__ ("lock; cmpxchg %2, %0"
+ : "+m" (*addr), "=a" (fetched_val)
+ : "r" (new_val), "a" (old_val)
+ : "memory");
+ return fetched_val;
+}
+#define AO_HAVE_fetch_compare_and_swap_full
+
+#if defined(__i386)
+
+# ifndef AO_NO_CMPXCHG8B
+# include "../standard_ao_double_t.h"
+
+ /* Reading or writing a quadword aligned on a 64-bit boundary is */
+ /* always carried out atomically (requires at least a Pentium). */
+# define AO_ACCESS_double_CHECK_ALIGNED
+# include "../loadstore/double_atomic_load_store.h"
+
+ /* Returns nonzero if the comparison succeeded. */
+ /* Really requires at least a Pentium. */
+ AO_INLINE int
+ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
+ AO_t old_val1, AO_t old_val2,
+ AO_t new_val1, AO_t new_val2)
+ {
+ char result;
+
+ __asm__ __volatile__ ("lock; cmpxchg8b %0; setz %1"
+ : "+m" (*addr), "=a" (result)
+ : "d" (old_val2), "a" (old_val1),
+ "c" (new_val2), "b" (new_val1)
+ : "memory");
+ return (int) result;
+ }
+# define AO_HAVE_compare_double_and_swap_double_full
+# endif /* !AO_NO_CMPXCHG8B */
+
+# define AO_T_IS_INT
+
+#else /* x64 */
+
+ AO_INLINE unsigned int
+ AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
+ {
+ unsigned int result;
+
+ __asm__ __volatile__ ("lock; xaddl %0, %1"
+ : "=r" (result), "+m" (*p)
+ : "0" (incr)
+ : "memory");
+ return result;
+ }
+# define AO_HAVE_int_fetch_and_add_full
+
+# ifdef AO_CMPXCHG16B_AVAILABLE
+# include "../standard_ao_double_t.h"
+
+ /* Older AMD Opterons are missing this instruction (SIGILL should */
+ /* be thrown in this case). */
+ AO_INLINE int
+ AO_compare_double_and_swap_double_full (volatile AO_double_t *addr,
+ AO_t old_val1, AO_t old_val2,
+ AO_t new_val1, AO_t new_val2)
+ {
+ char result;
+ __asm__ __volatile__ ("lock; cmpxchg16b %0; setz %1"
+ : "+m" (*addr), "=a" (result)
+ : "d" (old_val2), "a" (old_val1),
+ "c" (new_val2), "b" (new_val1)
+ : "memory");
+ return (int) result;
+ }
+# define AO_HAVE_compare_double_and_swap_double_full
+# endif /* !AO_CMPXCHG16B_AVAILABLE */
+
+#endif /* x64 */
+
+/* Real X86 implementations, except for some old 32-bit WinChips, */
+/* appear to enforce ordering between memory operations, EXCEPT that */
+/* a later read can pass earlier writes, presumably due to the visible */
+/* presence of store buffers. */
+/* We ignore both the WinChips and the fact that the official specs */
+/* seem to be much weaker (and arguably too weak to be usable). */
+#include "../ordered_except_wr.h"
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * These are common definitions for architectures on which test_and_set
+ * operates on pointer-sized quantities, the "clear" value contains
+ * all zeroes, and the "set" value contains only one lowest bit set.
+ * This can be used if test_and_set is synthesized from compare_and_swap.
+ */
+typedef enum {AO_TS_clear = 0, AO_TS_set = 1} AO_TS_val;
+#define AO_TS_VAL_t AO_TS_val
+#define AO_TS_CLEAR AO_TS_clear
+#define AO_TS_SET AO_TS_set
+
+#define AO_TS_t AO_t
+
+#define AO_AO_TS_T 1
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * These are common definitions for architectures on which test_and_set
+ * operates on byte sized quantities, the "clear" value contains
+ * all zeroes, and the "set" value contains all ones.
+ */
+
+#define AO_TS_t unsigned char
+typedef enum {AO_BYTE_TS_clear = 0, AO_BYTE_TS_set = 0xff} AO_BYTE_TS_val;
+#define AO_TS_VAL_t AO_BYTE_TS_val
+#define AO_TS_CLEAR AO_BYTE_TS_clear
+#define AO_TS_SET AO_BYTE_TS_set
+
+#define AO_CHAR_TS_T 1
--- /dev/null
+/*
+ * Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
+ *
+ * This file may be redistributed and/or modified under the
+ * terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2, or (at your option) any later version.
+ *
+ * It is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License in the
+ * file COPYING for more details.
+ */
+
+#if defined(HAVE_CONFIG_H)
+# include "config.h"
+#endif
+
+#define AO_REQUIRE_CAS
+#include "atomic_ops_malloc.h"
+
+#include <string.h> /* for ffs, which is assumed reentrant. */
+#include <stdlib.h>
+#include <assert.h>
+
+#ifdef AO_TRACE_MALLOC
+# include <stdio.h>
+# include <pthread.h>
+#endif
+
+#if (defined(_WIN32_WCE) || defined(__MINGW32CE__)) && !defined(abort)
+# define abort() _exit(-1) /* there is no abort() in WinCE */
+#endif
+
+/*
+ * We round up each allocation request to the next power of two
+ * minus one word.
+ * We keep one stack of free objects for each size. Each object
+ * has an initial word (offset -sizeof(AO_t) from the visible pointer)
+ * which contains either
+ * The binary log of the object size in bytes (small objects)
+ * The object size (a multiple of CHUNK_SIZE) for large objects.
+ * The second case only arises if mmap-based allocation is supported.
+ * We align the user-visible part of each object on a GRANULARITY
+ * byte boundary. That means that the actual (hidden) start of
+ * the object starts a word before this boundary.
+ */
+
+#ifndef LOG_MAX_SIZE
+# define LOG_MAX_SIZE 16
+ /* We assume that 2**LOG_MAX_SIZE is a multiple of page size. */
+#endif
+
+#ifndef ALIGNMENT
+# define ALIGNMENT 16
+ /* Assumed to be at least sizeof(AO_t). */
+#endif
+
+#define CHUNK_SIZE (1 << LOG_MAX_SIZE)
+
+#ifndef AO_INITIAL_HEAP_SIZE
+# define AO_INITIAL_HEAP_SIZE (2*(LOG_MAX_SIZE+1)*CHUNK_SIZE)
+#endif
+
+char AO_initial_heap[AO_INITIAL_HEAP_SIZE];
+
+static volatile AO_t initial_heap_ptr = (AO_t)AO_initial_heap;
+
+#if defined(HAVE_MMAP)
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+
+#if defined(MAP_ANONYMOUS) || defined(MAP_ANON)
+# define USE_MMAP_ANON
+#endif
+
+#ifdef USE_MMAP_FIXED
+# define GC_MMAP_FLAGS (MAP_FIXED | MAP_PRIVATE)
+ /* Seems to yield better performance on Solaris 2, but can */
+ /* be unreliable if something is already mapped at the address. */
+#else
+# define GC_MMAP_FLAGS MAP_PRIVATE
+#endif
+
+#ifdef USE_MMAP_ANON
+# ifdef MAP_ANONYMOUS
+# define OPT_MAP_ANON MAP_ANONYMOUS
+# else
+# define OPT_MAP_ANON MAP_ANON
+# endif
+#else
+# define OPT_MAP_ANON 0
+#endif
+
+static volatile AO_t mmap_enabled = 0;
+
+void
+AO_malloc_enable_mmap(void)
+{
+# if defined(__sun)
+ AO_store_release(&mmap_enabled, 1);
+ /* Workaround for Sun CC */
+# else
+ AO_store(&mmap_enabled, 1);
+# endif
+}
+
+static char *get_mmaped(size_t sz)
+{
+ char * result;
+# ifdef USE_MMAP_ANON
+# define zero_fd -1
+# else
+ int zero_fd;
+# endif
+
+ assert(!(sz & (CHUNK_SIZE - 1)));
+ if (!mmap_enabled)
+ return 0;
+
+# ifndef USE_MMAP_ANON
+ zero_fd = open("/dev/zero", O_RDONLY);
+ if (zero_fd == -1)
+ return 0;
+# endif
+ result = mmap(0, sz, PROT_READ | PROT_WRITE,
+ GC_MMAP_FLAGS | OPT_MAP_ANON, zero_fd, 0/* offset */);
+# ifndef USE_MMAP_ANON
+ close(zero_fd);
+# endif
+ if (result == MAP_FAILED)
+ result = 0;
+ return result;
+}
+
+/* Allocate an object of size (incl. header) of size > CHUNK_SIZE. */
+/* sz includes space for an AO_t-sized header. */
+static char *
+AO_malloc_large(size_t sz)
+{
+ char * result;
+ /* The header will force us to waste ALIGNMENT bytes, incl. header. */
+ sz += ALIGNMENT;
+ /* Round to multiple of CHUNK_SIZE. */
+ sz = (sz + CHUNK_SIZE - 1) & ~(CHUNK_SIZE - 1);
+ result = get_mmaped(sz);
+ if (result == 0) return 0;
+ result += ALIGNMENT;
+ ((AO_t *)result)[-1] = (AO_t)sz;
+ return result;
+}
+
+static void
+AO_free_large(char * p)
+{
+ AO_t sz = ((AO_t *)p)[-1];
+ if (munmap(p - ALIGNMENT, (size_t)sz) != 0)
+ abort(); /* Programmer error. Not really async-signal-safe, but ... */
+}
+
+
+#else /* No MMAP */
+
+void
+AO_malloc_enable_mmap(void)
+{
+}
+
+#define get_mmaped(sz) ((char*)0)
+#define AO_malloc_large(sz) ((char*)0)
+#define AO_free_large(p) abort()
+ /* Programmer error. Not really async-signal-safe, but ... */
+
+#endif /* No MMAP */
+
+static char *
+get_chunk(void)
+{
+ char *my_chunk_ptr;
+
+ for (;;) {
+ char *initial_ptr = (char *)AO_load(&initial_heap_ptr);
+
+ my_chunk_ptr = (char *)(((AO_t)initial_ptr + (ALIGNMENT - 1))
+ & ~(ALIGNMENT - 1));
+ if (initial_ptr != my_chunk_ptr)
+ {
+ /* Align correctly. If this fails, someone else did it for us. */
+ (void)AO_compare_and_swap_acquire(&initial_heap_ptr,
+ (AO_t)initial_ptr, (AO_t)my_chunk_ptr);
+ }
+
+ if (my_chunk_ptr - AO_initial_heap > AO_INITIAL_HEAP_SIZE - CHUNK_SIZE)
+ break;
+ if (AO_compare_and_swap(&initial_heap_ptr, (AO_t)my_chunk_ptr,
+ (AO_t)(my_chunk_ptr + CHUNK_SIZE))) {
+ return my_chunk_ptr;
+ }
+ }
+
+ /* We failed. The initial heap is used up. */
+ my_chunk_ptr = get_mmaped(CHUNK_SIZE);
+ assert (!((AO_t)my_chunk_ptr & (ALIGNMENT-1)));
+ return my_chunk_ptr;
+}
+
+/* Object free lists. Ith entry corresponds to objects */
+/* of total size 2**i bytes. */
+AO_stack_t AO_free_list[LOG_MAX_SIZE+1];
+
+/* Break up the chunk, and add it to the object free list for */
+/* the given size. We have exclusive access to chunk. */
+static void add_chunk_as(void * chunk, unsigned log_sz)
+{
+ size_t ofs, limit;
+ size_t sz = (size_t)1 << log_sz;
+
+ assert (CHUNK_SIZE >= sz);
+ limit = (size_t)CHUNK_SIZE - sz;
+ for (ofs = ALIGNMENT - sizeof(AO_t); ofs <= limit; ofs += sz) {
+ AO_stack_push(&AO_free_list[log_sz], (AO_t *)((char *)chunk + ofs));
+ }
+}
+
+static const int msbs[16] = {0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4};
+
+/* Return the position of the most significant set bit in the */
+/* argument. */
+/* We follow the conventions of ffs(), i.e. the least */
+/* significant bit is number one. */
+static int msb(size_t s)
+{
+ int result = 0;
+ int v;
+ if ((s & 0xff) != s) {
+ /* The following is a tricky code ought to be equivalent to */
+ /* "(v = s >> 32) != 0" but suppresses warnings on 32-bit arch's. */
+ if (sizeof(size_t) > 4 && (v = s >> (sizeof(size_t) > 4 ? 32 : 0)) != 0)
+ {
+ s = v;
+ result += 32;
+ }
+ if ((s >> 16) != 0)
+ {
+ s >>= 16;
+ result += 16;
+ }
+ if ((s >> 8) != 0)
+ {
+ s >>= 8;
+ result += 8;
+ }
+ }
+ if (s > 15)
+ {
+ s >>= 4;
+ result += 4;
+ }
+ result += msbs[s];
+ return result;
+}
+
+void *
+AO_malloc(size_t sz)
+{
+ AO_t *result;
+ int log_sz;
+
+ if (sz > CHUNK_SIZE)
+ return AO_malloc_large(sz);
+ log_sz = msb(sz + (sizeof(AO_t) - 1));
+ result = AO_stack_pop(AO_free_list+log_sz);
+ while (0 == result) {
+ void * chunk = get_chunk();
+ if (0 == chunk) return 0;
+ add_chunk_as(chunk, log_sz);
+ result = AO_stack_pop(AO_free_list+log_sz);
+ }
+ *result = log_sz;
+# ifdef AO_TRACE_MALLOC
+ fprintf(stderr, "%x: AO_malloc(%lu) = %p\n",
+ (int)pthread_self(), (unsigned long)sz, result+1);
+# endif
+ return result + 1;
+}
+
+void
+AO_free(void *p)
+{
+ char *base = (char *)p - sizeof(AO_t);
+ int log_sz;
+
+ if (0 == p) return;
+ log_sz = (int)(*(AO_t *)base);
+# ifdef AO_TRACE_MALLOC
+ fprintf(stderr, "%x: AO_free(%p sz:%lu)\n", (int)pthread_self(), p,
+ (unsigned long)(log_sz > LOG_MAX_SIZE? log_sz : (1 << log_sz)));
+# endif
+ if (log_sz > LOG_MAX_SIZE)
+ AO_free_large(p);
+ else
+ AO_stack_push(AO_free_list+log_sz, (AO_t *)base);
+}
--- /dev/null
+/*
+ * Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Almost lock-free malloc implementation based on stack implementation. */
+/* See doc/README_malloc.txt file for detailed usage rules. */
+
+#ifndef AO_MALLOC_H
+#define AO_MALLOC_H
+
+#include "atomic_ops_stack.h"
+
+#include <stddef.h> /* for size_t */
+
+#ifdef AO_STACK_IS_LOCK_FREE
+# define AO_MALLOC_IS_LOCK_FREE
+#endif
+
+void AO_free(void *);
+
+void * AO_malloc(size_t);
+
+/* Allow use of mmap to grow the heap. No-op on some platforms. */
+void AO_malloc_enable_mmap(void);
+
+#endif /* !AO_MALLOC_H */
--- /dev/null
+/*
+ * Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
+ *
+ * This file may be redistributed and/or modified under the
+ * terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2, or (at your option) any later version.
+ *
+ * It is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License in the
+ * file COPYING for more details.
+ */
+
+#if defined(HAVE_CONFIG_H)
+# include "config.h"
+#endif
+
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#define AO_REQUIRE_CAS
+#include "atomic_ops_stack.h"
+
+#ifdef AO_USE_ALMOST_LOCK_FREE
+
+ void AO_pause(int); /* defined in atomic_ops.c */
+
+/* LIFO linked lists based on compare-and-swap. We need to avoid */
+/* the case of a node deletion and reinsertion while I'm deleting */
+/* it, since that may cause my CAS to succeed eventhough the next */
+/* pointer is now wrong. Our solution is not fully lock-free, but it */
+/* is good enough for signal handlers, provided we have a suitably low */
+/* bound on the number of recursive signal handler reentries. */
+/* A list consists of a first pointer and a blacklist */
+/* of pointer values that are currently being removed. No list element */
+/* on the blacklist may be inserted. If we would otherwise do so, we */
+/* are allowed to insert a variant that differs only in the least */
+/* significant, ignored, bits. If the list is full, we wait. */
+
+/* Crucial observation: A particular padded pointer x (i.e. pointer */
+/* plus arbitrary low order bits) can never be newly inserted into */
+/* a list while it's in the corresponding auxiliary data structure. */
+
+/* The second argument is a pointer to the link field of the element */
+/* to be inserted. */
+/* Both list headers and link fields contain "perturbed" pointers, i.e. */
+/* pointers with extra bits "or"ed into the low order bits. */
+void
+AO_stack_push_explicit_aux_release(volatile AO_t *list, AO_t *x,
+ AO_stack_aux *a)
+{
+ AO_t x_bits = (AO_t)x;
+ AO_t next;
+
+ /* No deletions of x can start here, since x is not currently in the */
+ /* list. */
+ retry:
+# if AO_BL_SIZE == 2
+ {
+ /* Start all loads as close to concurrently as possible. */
+ AO_t entry1 = AO_load(a -> AO_stack_bl);
+ AO_t entry2 = AO_load(a -> AO_stack_bl + 1);
+ if (entry1 == x_bits || entry2 == x_bits)
+ {
+ /* Entry is currently being removed. Change it a little. */
+ ++x_bits;
+ if ((x_bits & AO_BIT_MASK) == 0)
+ /* Version count overflowed; */
+ /* EXTREMELY unlikely, but possible. */
+ x_bits = (AO_t)x;
+ goto retry;
+ }
+ }
+# else
+ {
+ int i;
+ for (i = 0; i < AO_BL_SIZE; ++i)
+ {
+ if (AO_load(a -> AO_stack_bl + i) == x_bits)
+ {
+ /* Entry is currently being removed. Change it a little. */
+ ++x_bits;
+ if ((x_bits & AO_BIT_MASK) == 0)
+ /* Version count overflowed; */
+ /* EXTREMELY unlikely, but possible. */
+ x_bits = (AO_t)x;
+ goto retry;
+ }
+ }
+ }
+# endif
+ /* x_bits is not currently being deleted */
+ do
+ {
+ next = AO_load(list);
+ *x = next;
+ }
+ while (AO_EXPECT_FALSE(!AO_compare_and_swap_release(list, next, x_bits)));
+}
+
+/*
+ * I concluded experimentally that checking a value first before
+ * performing a compare-and-swap is usually beneficial on X86, but
+ * slows things down appreciably with contention on Itanium.
+ * Since the Itanium behavior makes more sense to me (more cache line
+ * movement unless we're mostly reading, but back-off should guard
+ * against that), we take Itanium as the default. Measurements on
+ * other multiprocessor architectures would be useful. (On a uniprocessor,
+ * the initial check is almost certainly a very small loss.) - HB
+ */
+#ifdef __i386__
+# define PRECHECK(a) (a) == 0 &&
+#else
+# define PRECHECK(a)
+#endif
+
+AO_t *
+AO_stack_pop_explicit_aux_acquire(volatile AO_t *list, AO_stack_aux * a)
+{
+ unsigned i;
+ int j = 0;
+ AO_t first;
+ AO_t * first_ptr;
+ AO_t next;
+
+ retry:
+ first = AO_load(list);
+ if (0 == first) return 0;
+ /* Insert first into aux black list. */
+ /* This may spin if more than AO_BL_SIZE removals using auxiliary */
+ /* structure a are currently in progress. */
+ for (i = 0; ; )
+ {
+ if (PRECHECK(a -> AO_stack_bl[i])
+ AO_compare_and_swap_acquire(a->AO_stack_bl+i, 0, first))
+ break;
+ ++i;
+ if ( i >= AO_BL_SIZE )
+ {
+ i = 0;
+ AO_pause(++j);
+ }
+ }
+ assert(i < AO_BL_SIZE);
+ assert(a -> AO_stack_bl[i] == first);
+ /* First is on the auxiliary black list. It may be removed by */
+ /* another thread before we get to it, but a new insertion of x */
+ /* cannot be started here. */
+ /* Only we can remove it from the black list. */
+ /* We need to make sure that first is still the first entry on the */
+ /* list. Otherwise it's possible that a reinsertion of it was */
+ /* already started before we added the black list entry. */
+# if defined(__alpha__) && (__GNUC__ == 4)
+ if (first != AO_load(list))
+ /* Workaround __builtin_expect bug found in */
+ /* gcc-4.6.3/alpha causing test_stack failure. */
+# else
+ if (AO_EXPECT_FALSE(first != AO_load(list)))
+# endif
+ {
+ AO_store_release(a->AO_stack_bl+i, 0);
+ goto retry;
+ }
+ first_ptr = AO_REAL_NEXT_PTR(first);
+ next = AO_load(first_ptr);
+# if defined(__alpha__) && (__GNUC__ == 4)
+ if (!AO_compare_and_swap_release(list, first, next))
+# else
+ if (AO_EXPECT_FALSE(!AO_compare_and_swap_release(list, first, next)))
+# endif
+ {
+ AO_store_release(a->AO_stack_bl+i, 0);
+ goto retry;
+ }
+ assert(*list != first);
+ /* Since we never insert an entry on the black list, this cannot have */
+ /* succeeded unless first remained on the list while we were running. */
+ /* Thus its next link cannot have changed out from under us, and we */
+ /* removed exactly one entry and preserved the rest of the list. */
+ /* Note that it is quite possible that an additional entry was */
+ /* inserted and removed while we were running; this is OK since the */
+ /* part of the list following first must have remained unchanged, and */
+ /* first must again have been at the head of the list when the */
+ /* compare_and_swap succeeded. */
+ AO_store_release(a->AO_stack_bl+i, 0);
+ return first_ptr;
+}
+
+#else /* ! USE_ALMOST_LOCK_FREE */
+
+/* Better names for fields in AO_stack_t */
+#define ptr AO_val2
+#define version AO_val1
+
+#if defined(AO_HAVE_compare_double_and_swap_double)
+
+void AO_stack_push_release(AO_stack_t *list, AO_t *element)
+{
+ AO_t next;
+
+ do {
+ next = AO_load(&(list -> ptr));
+ *element = next;
+ } while (AO_EXPECT_FALSE(!AO_compare_and_swap_release(&(list -> ptr),
+ next, (AO_t)element)));
+ /* This uses a narrow CAS here, an old optimization suggested */
+ /* by Treiber. Pop is still safe, since we run into the ABA */
+ /* problem only if there were both intervening "pop"s and "push"es. */
+ /* In that case we still see a change in the version number. */
+}
+
+AO_t *AO_stack_pop_acquire(AO_stack_t *list)
+{
+# ifdef __clang__
+ AO_t *volatile cptr;
+ /* Use volatile to workaround a bug in */
+ /* clang-1.1/x86 causing test_stack failure. */
+# else
+ AO_t *cptr;
+# endif
+ AO_t next;
+ AO_t cversion;
+
+ do {
+ /* Version must be loaded first. */
+ cversion = AO_load_acquire(&(list -> version));
+ cptr = (AO_t *)AO_load(&(list -> ptr));
+ if (cptr == 0) return 0;
+ next = *cptr;
+ } while (AO_EXPECT_FALSE(!AO_compare_double_and_swap_double_release(list,
+ cversion, (AO_t)cptr,
+ cversion+1, (AO_t)next)));
+ return cptr;
+}
+
+
+#elif defined(AO_HAVE_compare_and_swap_double)
+
+/* Needed for future IA64 processors. No current clients? */
+
+#error Untested! Probably doesnt work.
+
+/* We have a wide CAS, but only does an AO_t-wide comparison. */
+/* We can't use the Treiber optimization, since we only check */
+/* for an unchanged version number, not an unchanged pointer. */
+void AO_stack_push_release(AO_stack_t *list, AO_t *element)
+{
+ AO_t version;
+ AO_t next_ptr;
+
+ do {
+ /* Again version must be loaded first, for different reason. */
+ version = AO_load_acquire(&(list -> version));
+ next_ptr = AO_load(&(list -> ptr));
+ *element = next_ptr;
+ } while (!AO_compare_and_swap_double_release(
+ list, version,
+ version+1, (AO_t) element));
+}
+
+AO_t *AO_stack_pop_acquire(AO_stack_t *list)
+{
+ AO_t *cptr;
+ AO_t next;
+ AO_t cversion;
+
+ do {
+ cversion = AO_load_acquire(&(list -> version));
+ cptr = (AO_t *)AO_load(&(list -> ptr));
+ if (cptr == 0) return 0;
+ next = *cptr;
+ } while (!AO_compare_double_and_swap_double_release
+ (list, cversion, (AO_t) cptr, cversion+1, next));
+ return cptr;
+}
+
+
+#endif /* AO_HAVE_compare_and_swap_double */
+
+#endif /* ! USE_ALMOST_LOCK_FREE */
--- /dev/null
+/*
+ * The implementation of the routines described here is covered by the GPL.
+ * This header file is covered by the following license:
+ */
+
+/*
+ * Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Almost lock-free LIFO linked lists (linked stacks). */
+#ifndef AO_STACK_H
+#define AO_STACK_H
+
+#include "atomic_ops.h"
+
+#if !defined(AO_HAVE_compare_double_and_swap_double) \
+ && !defined(AO_HAVE_compare_double_and_swap) \
+ && defined(AO_HAVE_compare_and_swap)
+# define AO_USE_ALMOST_LOCK_FREE
+#else
+ /* If we have no compare-and-swap operation defined, we assume */
+ /* that we will actually be using CAS emulation. If we do that, */
+ /* it's cheaper to use the version-based implementation. */
+# define AO_STACK_IS_LOCK_FREE
+#endif
+
+/*
+ * These are not guaranteed to be completely lock-free.
+ * List insertion may spin under extremely unlikely conditions.
+ * It cannot deadlock due to recursive reentry unless AO_list_remove
+ * is called while at least AO_BL_SIZE activations of
+ * AO_list_remove are currently active in the same thread, i.e.
+ * we must have at least AO_BL_SIZE recursive signal handler
+ * invocations.
+ *
+ * All operations take an AO_list_aux argument. It is safe to
+ * share a single AO_list_aux structure among all lists, but that
+ * may increase contention. Any given list must always be accessed
+ * with the same AO_list_aux structure.
+ *
+ * We make some machine-dependent assumptions:
+ * - We have a compare-and-swap operation.
+ * - At least _AO_N_BITS low order bits in pointers are
+ * zero and normally unused.
+ * - size_t and pointers have the same size.
+ *
+ * We do use a fully lock-free implementation if double-width
+ * compare-and-swap operations are available.
+ */
+
+#ifdef AO_USE_ALMOST_LOCK_FREE
+/* The number of low order pointer bits we can use for a small */
+/* version number. */
+# if defined(__LP64__) || defined(_LP64) || defined(_WIN64)
+ /* WIN64 isn't really supported yet. */
+# define AO_N_BITS 3
+# else
+# define AO_N_BITS 2
+# endif
+
+# define AO_BIT_MASK ((1 << AO_N_BITS) - 1)
+/*
+ * AO_stack_aux should be treated as opaque.
+ * It is fully defined here, so it can be allocated, and to facilitate
+ * debugging.
+ */
+#ifndef AO_BL_SIZE
+# define AO_BL_SIZE 2
+#endif
+
+#if AO_BL_SIZE > (1 << AO_N_BITS)
+# error AO_BL_SIZE too big
+#endif
+
+typedef struct AO__stack_aux {
+ volatile AO_t AO_stack_bl[AO_BL_SIZE];
+} AO_stack_aux;
+
+/* The stack implementation knows only about the location of */
+/* link fields in nodes, and nothing about the rest of the */
+/* stack elements. Link fields hold an AO_t, which is not */
+/* necessarily a real pointer. This converts the AO_t to a */
+/* real (AO_t *) which is either o, or points at the link */
+/* field in the next node. */
+#define AO_REAL_NEXT_PTR(x) (AO_t *)((x) & ~AO_BIT_MASK)
+
+/* The following two routines should not normally be used directly. */
+/* We make them visible here for the rare cases in which it makes sense */
+/* to share the an AO_stack_aux between stacks. */
+void
+AO_stack_push_explicit_aux_release(volatile AO_t *list, AO_t *x,
+ AO_stack_aux *);
+
+AO_t *
+AO_stack_pop_explicit_aux_acquire(volatile AO_t *list, AO_stack_aux *);
+
+/* And now AO_stack_t for the real interface: */
+
+typedef struct AO__stack {
+ volatile AO_t AO_ptr;
+ AO_stack_aux AO_aux;
+} AO_stack_t;
+
+#define AO_STACK_INITIALIZER {0,{{0}}}
+
+AO_INLINE void AO_stack_init(AO_stack_t *list)
+{
+# if AO_BL_SIZE == 2
+ list -> AO_aux.AO_stack_bl[0] = 0;
+ list -> AO_aux.AO_stack_bl[1] = 0;
+# else
+ int i;
+ for (i = 0; i < AO_BL_SIZE; ++i)
+ list -> AO_aux.AO_stack_bl[i] = 0;
+# endif
+ list -> AO_ptr = 0;
+}
+
+/* Convert an AO_stack_t to a pointer to the link field in */
+/* the first element. */
+#define AO_REAL_HEAD_PTR(x) AO_REAL_NEXT_PTR((x).AO_ptr)
+
+#define AO_stack_push_release(l, e) \
+ AO_stack_push_explicit_aux_release(&((l)->AO_ptr), e, &((l)->AO_aux))
+#define AO_HAVE_stack_push_release
+
+#define AO_stack_pop_acquire(l) \
+ AO_stack_pop_explicit_aux_acquire(&((l)->AO_ptr), &((l)->AO_aux))
+#define AO_HAVE_stack_pop_acquire
+
+# else /* Use fully non-blocking data structure, wide CAS */
+
+#ifndef AO_HAVE_double_t
+ /* Can happen if we're using CAS emulation, since we don't want to */
+ /* force that here, in case other atomic_ops clients don't want it. */
+# include "atomic_ops/sysdeps/standard_ao_double_t.h"
+#endif
+
+typedef volatile AO_double_t AO_stack_t;
+/* AO_val1 is version, AO_val2 is pointer. */
+
+#define AO_STACK_INITIALIZER AO_DOUBLE_T_INITIALIZER
+
+AO_INLINE void AO_stack_init(AO_stack_t *list)
+{
+ list -> AO_val1 = 0;
+ list -> AO_val2 = 0;
+}
+
+#define AO_REAL_HEAD_PTR(x) (AO_t *)((x).AO_val2)
+#define AO_REAL_NEXT_PTR(x) (AO_t *)(x)
+
+void AO_stack_push_release(AO_stack_t *list, AO_t *new_element);
+#define AO_HAVE_stack_push_release
+AO_t * AO_stack_pop_acquire(AO_stack_t *list);
+#define AO_HAVE_stack_pop_acquire
+
+#endif /* Wide CAS case */
+
+#if defined(AO_HAVE_stack_push_release) && !defined(AO_HAVE_stack_push)
+# define AO_stack_push(l, e) AO_stack_push_release(l, e)
+# define AO_HAVE_stack_push
+#endif
+
+#if defined(AO_HAVE_stack_pop_acquire) && !defined(AO_HAVE_stack_pop)
+# define AO_stack_pop(l) AO_stack_pop_acquire(l)
+# define AO_HAVE_stack_pop
+#endif
+
+#endif /* !AO_STACK_H */
--- /dev/null
+/*
+ * Include the appropriate system-dependent assembly file, if any.
+ * This is used only if the platform supports neither inline assembly
+ * code, nor appropriate compiler intrinsics.
+ */
+
+#if !defined(__GNUC__) && (defined(sparc) || defined(__sparc))
+# include "atomic_ops/sysdeps/sunc/sparc.S"
+#endif
--- /dev/null
+EXTRA_DIST=test_atomic_include.template list_atomic.template run_parallel.h \
+ test_atomic_include.h list_atomic.c
+# We distribute test_atomic_include.h and list_atomic.c, since it is hard
+# to regenerate them on Windows without sed.
+
+BUILT_SOURCES = test_atomic_include.h list_atomic.i list_atomic.o
+CLEANFILES = list_atomic.i list_atomic.o
+
+AM_CPPFLAGS = \
+ -I$(top_builddir)/src -I$(top_srcdir)/src \
+ -I$(top_builddir)/tests -I$(top_srcdir)/tests
+
+if HAVE_PTHREAD_H
+TESTS=test_atomic test_atomic_pthreads test_stack test_malloc
+test_atomic_pthreads_SOURCES=$(test_atomic_SOURCES)
+test_atomic_pthreads_CPPFLAGS=-DAO_USE_PTHREAD_DEFS $(AM_CPPFLAGS)
+test_atomic_pthreads_LDADD=$(test_atomic_LDADD)
+else
+TESTS=test_atomic test_stack test_malloc
+endif
+
+check_PROGRAMS=$(TESTS)
+
+test_atomic_SOURCES=test_atomic.c
+test_atomic_LDADD = $(THREADDLLIBS) $(top_builddir)/src/libatomic_ops.la
+
+test_stack_SOURCES=test_stack.c
+test_stack_LDADD = $(THREADDLLIBS) \
+ $(top_builddir)/src/libatomic_ops_gpl.la \
+ $(top_builddir)/src/libatomic_ops.la
+
+test_malloc_SOURCES=test_malloc.c
+test_malloc_LDADD = $(THREADDLLIBS) \
+ $(top_builddir)/src/libatomic_ops_gpl.la \
+ $(top_builddir)/src/libatomic_ops.la
+
+test_atomic_include.h: test_atomic_include.template
+ mkdir -p `dirname $@`
+ sed -e s:XX::g $? > $@
+ sed -e s:XX:_release:g $? >> $@
+ sed -e s:XX:_acquire:g $? >> $@
+ sed -e s:XX:_read:g $? >> $@
+ sed -e s:XX:_write:g $? >> $@
+ sed -e s:XX:_full:g $? >> $@
+ sed -e s:XX:_release_write:g $? >> $@
+ sed -e s:XX:_acquire_read:g $? >> $@
+
+list_atomic.c: list_atomic.template
+ mkdir -p `dirname $@`
+ echo "#include \"atomic_ops.h\"" > $@
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX::g $? >> $@
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX:_release:g $? >> $@
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX:_acquire:g $? >> $@
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX:_read:g $? >> $@
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX:_write:g $? >> $@
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX:_full:g $? >> $@
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX:_release_write:g $? >> $@
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX:_acquire_read:g $? >> $@
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX:_dd_acquire_read:g $? >> $@
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX::g $? >> $@
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX:_release:g $? >> $@
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX:_acquire:g $? >> $@
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX:_read:g $? >> $@
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX:_write:g $? >> $@
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX:_full:g $? >> $@
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX:_release_write:g $? >> $@
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX:_acquire_read:g $? >> $@
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX:_dd_acquire_read:g $? >> $@
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX::g $? >> $@
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX:_release:g $? >> $@
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX:_acquire:g $? >> $@
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX:_read:g $? >> $@
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX:_write:g $? >> $@
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX:_full:g $? >> $@
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX:_release_write:g $? >> $@
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX:_acquire_read:g $? >> $@
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX:_dd_acquire_read:g $? >> $@
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX::g $? >> $@
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX:_release:g $? >> $@
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX:_acquire:g $? >> $@
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX:_read:g $? >> $@
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX:_write:g $? >> $@
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX:_full:g $? >> $@
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX:_release_write:g $? >> $@
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX:_acquire_read:g $? >> $@
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX:_dd_acquire_read:g $? >> $@
+ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX::g $? >> $@
+ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX:_release:g $? >> $@
+ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX:_acquire:g $? >> $@
+ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX:_read:g $? >> $@
+ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX:_write:g $? >> $@
+ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX:_full:g $? >> $@
+ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX:_release_write:g $? >> $@
+ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX:_acquire_read:g $? >> $@
+ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX:_dd_acquire_read:g $? >> $@
+
+list_atomic.i: list_atomic.c
+ mkdir -p `dirname $@`
+ $(COMPILE) $? -E > $@
+
+# Verify list_atomic.c syntax:
+list_atomic.o: list_atomic.c
+ $(COMPILE) -c -o $@ $?
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * This file is covered by the GNU general public license, version 2.
+ * see COPYING for details.
+ */
+
+/* This generates a compilable program. But it is really meant to be */
+/* be used only with cc -E, to inspect the expensions generated by */
+/* primitives. */
+
+/* The result will not link or run. */
+
+void XSIZE_list_atomicXX(void)
+{
+# if defined(AO_HAVE_XSIZE_loadXX) || defined(AO_HAVE_XSIZE_storeXX) \
+ || defined(AO_HAVE_XSIZE_fetch_and_addXX) \
+ || defined(AO_HAVE_XSIZE_fetch_and_add1XX) \
+ || defined(AO_HAVE_XSIZE_andXX) \
+ || defined(AO_HAVE_XSIZE_compare_and_swapXX) \
+ || defined(AO_HAVE_XSIZE_fetch_compare_and_swapXX)
+ static volatile XCTYPE val /* = 0 */;
+# endif
+# if defined(AO_HAVE_XSIZE_compare_and_swapXX) \
+ || defined(AO_HAVE_XSIZE_fetch_compare_and_swapXX)
+ static XCTYPE oldval /* = 0 */;
+# endif
+# if defined(AO_HAVE_XSIZE_storeXX) \
+ || defined(AO_HAVE_XSIZE_compare_and_swapXX) \
+ || defined(AO_HAVE_XSIZE_fetch_compare_and_swapXX)
+ static XCTYPE newval /* = 0 */;
+# endif
+# if defined(AO_HAVE_test_and_setXX)
+ AO_TS_t ts;
+# endif
+# if defined(AO_HAVE_XSIZE_fetch_and_addXX) || defined(AO_HAVE_XSIZE_andXX) \
+ || defined(AO_HAVE_XSIZE_orXX) || defined(AO_HAVE_XSIZE_xorXX)
+ static XCTYPE incr /* = 0 */;
+# endif
+
+# if defined(AO_HAVE_nopXX)
+ (void)"AO_nopXX(): ";
+ AO_nopXX();
+# else
+ (void)"No AO_nopXX";
+# endif
+
+# ifdef AO_HAVE_XSIZE_loadXX
+ (void)"AO_XSIZE_loadXX(&val):";
+ AO_XSIZE_loadXX(&val);
+# else
+ (void)"No AO_XSIZE_loadXX";
+# endif
+# ifdef AO_HAVE_XSIZE_storeXX
+ (void)"AO_XSIZE_storeXX(&val, newval):";
+ AO_XSIZE_storeXX(&val, newval);
+# else
+ (void)"No AO_XSIZE_storeXX";
+# endif
+# ifdef AO_HAVE_XSIZE_fetch_and_addXX
+ (void)"AO_XSIZE_fetch_and_addXX(&val, incr):";
+ AO_XSIZE_fetch_and_addXX(&val, incr);
+# else
+ (void)"No AO_XSIZE_fetch_and_addXX";
+# endif
+# ifdef AO_HAVE_XSIZE_fetch_and_add1XX
+ (void)"AO_XSIZE_fetch_and_add1XX(&val):";
+ AO_XSIZE_fetch_and_add1XX(&val);
+# else
+ (void)"No AO_XSIZE_fetch_and_add1XX";
+# endif
+# ifdef AO_HAVE_XSIZE_fetch_and_sub1XX
+ (void)"AO_XSIZE_fetch_and_sub1XX(&val):";
+ AO_XSIZE_fetch_and_sub1XX(&val);
+# else
+ (void)"No AO_XSIZE_fetch_and_sub1XX";
+# endif
+# ifdef AO_HAVE_XSIZE_andXX
+ (void)"AO_XSIZE_andXX(&val, incr):";
+ AO_XSIZE_andXX(&val, incr);
+# else
+ (void)"No AO_XSIZE_andXX";
+# endif
+# ifdef AO_HAVE_XSIZE_orXX
+ (void)"AO_XSIZE_orXX(&val, incr):";
+ AO_XSIZE_orXX(&val, incr);
+# else
+ (void)"No AO_XSIZE_orXX";
+# endif
+# ifdef AO_HAVE_XSIZE_xorXX
+ (void)"AO_XSIZE_xorXX(&val, incr):";
+ AO_XSIZE_xorXX(&val, incr);
+# else
+ (void)"No AO_XSIZE_xorXX";
+# endif
+# ifdef AO_HAVE_XSIZE_compare_and_swapXX
+ (void)"AO_XSIZE_compare_and_swapXX(&val, oldval, newval):";
+ AO_XSIZE_compare_and_swapXX(&val, oldval, newval);
+# else
+ (void)"No AO_XSIZE_compare_and_swapXX";
+# endif
+ /* TODO: Add AO_compare_double_and_swap_doubleXX */
+ /* TODO: Add AO_compare_and_swap_doubleXX */
+# ifdef AO_HAVE_XSIZE_fetch_compare_and_swapXX
+ (void)"AO_XSIZE_fetch_compare_and_swapXX(&val, oldval, newval):";
+ AO_XSIZE_fetch_compare_and_swapXX(&val, oldval, newval);
+# else
+ (void)"No AO_XSIZE_fetch_compare_and_swapXX";
+# endif
+
+# if defined(AO_HAVE_test_and_setXX)
+ (void)"AO_test_and_setXX(&ts):";
+ AO_test_and_setXX(&ts);
+# else
+ (void)"No AO_test_and_setXX";
+# endif
+}
--- /dev/null
+/*
+ * Copyright (c) 2003-2005 Hewlett-Packard Development Company, L.P.
+ *
+ * This file is covered by the GNU general public license, version 2.
+ * see COPYING for details.
+ */
+
+#if defined(_MSC_VER) || \
+ defined(_WIN32) && !defined(__CYGWIN32__) && !defined(__CYGWIN__) || \
+ defined(_WIN32_WINCE)
+# define USE_WINTHREADS
+#elif defined(__vxworks)
+# define USE_VXTHREADS
+#else
+# define USE_PTHREADS
+#endif
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#ifdef USE_PTHREADS
+# include <pthread.h>
+#endif
+
+#ifdef USE_VXTHREADS
+# include <vxworks.h>
+# include <taskLib.h>
+#endif
+
+#ifdef USE_WINTHREADS
+# include <windows.h>
+#endif
+
+#include "atomic_ops.h"
+
+#if (defined(_WIN32_WCE) || defined(__MINGW32CE__)) && !defined(abort)
+# define abort() _exit(-1) /* there is no abort() in WinCE */
+#endif
+
+#ifndef _WIN64
+# define AO_PTRDIFF_T long
+#elif defined(__int64)
+# define AO_PTRDIFF_T __int64
+#else
+# define AO_PTRDIFF_T long long
+#endif
+
+typedef void * (* thr_func)(void *);
+
+typedef int (* test_func)(void); /* Returns != 0 on success */
+
+void * run_parallel(int nthreads, thr_func f1, test_func t, const char *name);
+
+#ifdef USE_PTHREADS
+void * run_parallel(int nthreads, thr_func f1, test_func t, const char *name)
+{
+ pthread_attr_t attr;
+ pthread_t thr[100];
+ int i;
+ int code;
+
+ printf("Testing %s\n", name);
+ if (nthreads > 100)
+ {
+ fprintf(stderr, "run_parallel: requested too many threads\n");
+ abort();
+ }
+
+# ifdef _HPUX_SOURCE
+ /* Default stack size is too small, especially with the 64 bit ABI */
+ /* Increase it. */
+ if (pthread_default_stacksize_np(1024*1024, 0) != 0)
+ {
+ fprintf(stderr, "pthread_default_stacksize_np failed. "
+ "OK after first call.\n");
+ }
+# endif
+
+ pthread_attr_init(&attr);
+
+ for (i = 0; i < nthreads; ++i)
+ {
+ if ((code = pthread_create(thr + i, &attr, f1, (void *)(long)i)) != 0)
+ {
+ fprintf(stderr, "pthread_create returned %d, thread %d\n", code, i);
+ abort();
+ }
+ }
+ for (i = 0; i < nthreads; ++i)
+ {
+ if ((code = pthread_join(thr[i], NULL)) != 0)
+ {
+ fprintf(stderr, "pthread_join returned %d, thread %d\n", code, i);
+ abort();
+ }
+ }
+ if (t())
+ {
+ printf("Succeeded\n");
+ }
+ else
+ {
+ fprintf(stderr, "Failed\n");
+ abort();
+ }
+ return 0;
+}
+#endif /* USE_PTHREADS */
+
+#ifdef USE_VXTHREADS
+void * run_parallel(int nthreads, thr_func f1, test_func t, const char *name)
+{
+ int thr[100];
+ int i;
+
+ printf("Testing %s\n", name);
+ if (nthreads > 100)
+ {
+ fprintf(stderr, "run_parallel: requested too many threads\n");
+ taskSuspend(0);
+ }
+
+ for (i = 0; i < nthreads; ++i)
+ {
+ thr[i] = taskSpawn((char*) name, 180, 0, 32768, (FUNCPTR) f1, i,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9);
+ if (thr[i] == ERROR)
+ {
+ fprintf(stderr, "taskSpawn failed with %d, thread %d\n",
+ errno, i);
+ taskSuspend(0);
+ }
+ }
+ for (i = 0; i < nthreads; ++i)
+ {
+ while (taskIdVerify(thr[i]) == OK)
+ taskDelay(60);
+ }
+ if (t())
+ {
+ printf("Succeeded\n");
+ }
+ else
+ {
+ fprintf(stderr, "Failed\n");
+ taskSuspend(0);
+ }
+ return 0;
+}
+#endif /* USE_VXTHREADS */
+
+#ifdef USE_WINTHREADS
+
+struct tramp_args {
+ thr_func fn;
+ long arg;
+};
+
+DWORD WINAPI tramp(LPVOID param)
+{
+ struct tramp_args *args = (struct tramp_args *)param;
+
+ return (DWORD)(AO_PTRDIFF_T)(*args->fn)((LPVOID)(AO_PTRDIFF_T)args->arg);
+}
+
+void * run_parallel(int nthreads, thr_func f1, test_func t, const char *name)
+{
+ HANDLE thr[100];
+ struct tramp_args args[100];
+ int i;
+ DWORD code;
+
+ printf("Testing %s\n", name);
+ if (nthreads > 100)
+ {
+ fprintf(stderr, "run_parallel: requested too many threads\n");
+ abort();
+ }
+
+ for (i = 0; i < nthreads; ++i)
+ {
+ args[i].fn = f1;
+ args[i].arg = i;
+ if ((thr[i] = CreateThread(NULL, 0, tramp, (LPVOID)(args+i), 0, NULL))
+ == NULL)
+ {
+ fprintf(stderr, "CreateThread failed with %lu, thread %d\n",
+ (unsigned long)GetLastError(), i);
+ abort();
+ }
+ }
+ for (i = 0; i < nthreads; ++i)
+ {
+ if ((code = WaitForSingleObject(thr[i], INFINITE)) != WAIT_OBJECT_0)
+ {
+ fprintf(stderr, "WaitForSingleObject returned %lu, thread %d\n",
+ (unsigned long)code, i);
+ abort();
+ }
+ }
+ if (t())
+ {
+ printf("Succeeded\n");
+ }
+ else
+ {
+ fprintf(stderr, "Failed\n");
+ abort();
+ }
+ return 0;
+}
+#endif /* USE_WINTHREADS */
--- /dev/null
+/*
+ * Copyright (c) 2003-2005 Hewlett-Packard Development Company, L.P.
+ *
+ * This file may be redistributed and/or modified under the
+ * terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2, or (at your option) any later version.
+ *
+ * It is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License in the
+ * file COPYING for more details.
+ */
+
+#if defined(HAVE_CONFIG_H)
+# include "config.h"
+#endif
+
+#if defined(AO_NO_PTHREADS) && defined(AO_USE_PTHREAD_DEFS)
+# include <stdio.h>
+
+ int main(void)
+ {
+ printf("test skipped\n");
+ return 0;
+ }
+
+#else
+
+#include "run_parallel.h"
+
+#include "test_atomic_include.h"
+
+#ifdef AO_USE_PTHREAD_DEFS
+# define NITERS 100000
+#else
+# define NITERS 10000000
+#endif
+
+void * add1sub1_thr(void * id);
+int add1sub1_test(void);
+void * acqrel_thr(void *id);
+int acqrel_test(void);
+void * test_and_set_thr(void * id);
+int test_and_set_test(void);
+
+#if defined(AO_HAVE_fetch_and_add1) && defined(AO_HAVE_fetch_and_sub1)
+
+AO_t counter = 0;
+
+void * add1sub1_thr(void * id)
+{
+ int me = (int)(AO_PTRDIFF_T)id;
+
+ int i;
+
+ for (i = 0; i < NITERS; ++i)
+ if ((me & 1) != 0) {
+ (void)AO_fetch_and_sub1(&counter);
+ } else {
+ (void)AO_fetch_and_add1(&counter);
+ }
+ return 0;
+}
+
+int add1sub1_test(void)
+{
+ return counter == 0;
+}
+
+#endif /* defined(AO_HAVE_fetch_and_add1) && defined(AO_HAVE_fetch_and_sub1) */
+
+#if defined(AO_HAVE_store_release_write) && defined(AO_HAVE_load_acquire_read)
+
+/* Invariant: counter1 >= counter2 */
+AO_t counter1 = 0;
+AO_t counter2 = 0;
+
+void * acqrel_thr(void *id)
+{
+ int me = (int)(AO_PTRDIFF_T)id;
+
+ int i;
+
+ for (i = 0; i < NITERS; ++i)
+ if (me & 1)
+ {
+ AO_t my_counter1;
+ if (me != 1)
+ {
+ fprintf(stderr, "acqrel test: too many threads\n");
+ abort();
+ }
+ my_counter1 = AO_load(&counter1);
+ AO_store(&counter1, my_counter1 + 1);
+ AO_store_release_write(&counter2, my_counter1 + 1);
+ }
+ else
+ {
+ AO_t my_counter1a, my_counter2a;
+ AO_t my_counter1b, my_counter2b;
+
+ my_counter2a = AO_load_acquire_read(&counter2);
+ my_counter1a = AO_load(&counter1);
+ /* Redo this, to make sure that the second load of counter1 */
+ /* is not viewed as a common subexpression. */
+ my_counter2b = AO_load_acquire_read(&counter2);
+ my_counter1b = AO_load(&counter1);
+ if (my_counter1a < my_counter2a)
+ {
+ fprintf(stderr, "Saw release store out of order: %lu < %lu\n",
+ (unsigned long)my_counter1a, (unsigned long)my_counter2a);
+ abort();
+ }
+ if (my_counter1b < my_counter2b)
+ {
+ fprintf(stderr,
+ "Saw release store out of order (bad CSE?): %lu < %lu\n",
+ (unsigned long)my_counter1b, (unsigned long)my_counter2b);
+ abort();
+ }
+ }
+
+ return 0;
+}
+
+int acqrel_test(void)
+{
+ return counter1 == NITERS && counter2 == NITERS;
+}
+
+#endif /* AO_HAVE_store_release_write && AO_HAVE_load_acquire_read */
+
+#if defined(AO_HAVE_test_and_set_acquire)
+
+AO_TS_t lock = AO_TS_INITIALIZER;
+
+unsigned long locked_counter;
+volatile unsigned long junk = 13;
+
+void * test_and_set_thr(void * id)
+{
+ unsigned long i;
+
+ for (i = 0; i < NITERS/10; ++i)
+ {
+ while (AO_test_and_set_acquire(&lock) != AO_TS_CLEAR);
+ ++locked_counter;
+ if (locked_counter != 1)
+ {
+ fprintf(stderr, "Test and set failure 1, counter = %ld, id = %d\n",
+ (long)locked_counter, (int)(AO_PTRDIFF_T)id);
+ abort();
+ }
+ locked_counter *= 2;
+ locked_counter -= 1;
+ locked_counter *= 5;
+ locked_counter -= 4;
+ if (locked_counter != 1)
+ {
+ fprintf(stderr, "Test and set failure 2, counter = %ld, id = %d\n",
+ (long)locked_counter, (int)(AO_PTRDIFF_T)id);
+ abort();
+ }
+ --locked_counter;
+ AO_CLEAR(&lock);
+ /* Spend a bit of time outside the lock. */
+ junk *= 17;
+ junk *= 17;
+ }
+ return 0;
+}
+
+int test_and_set_test(void)
+{
+ return locked_counter == 0;
+}
+
+#endif /* defined(AO_HAVE_test_and_set_acquire) */
+
+int main(void)
+{
+ test_atomic();
+ test_atomic_acquire();
+ test_atomic_release();
+ test_atomic_read();
+ test_atomic_write();
+ test_atomic_full();
+ test_atomic_release_write();
+ test_atomic_acquire_read();
+# if defined(AO_HAVE_fetch_and_add1) && defined(AO_HAVE_fetch_and_sub1)
+ run_parallel(4, add1sub1_thr, add1sub1_test, "add1/sub1");
+# endif
+# if defined(AO_HAVE_store_release_write) && defined(AO_HAVE_load_acquire_read)
+ run_parallel(3, acqrel_thr, acqrel_test,
+ "store_release_write/load_acquire_read");
+# endif
+# if defined(AO_HAVE_test_and_set_acquire)
+ run_parallel(5, test_and_set_thr, test_and_set_test,
+ "test_and_set");
+# endif
+ return 0;
+}
+
+#endif /* !AO_NO_PTHREADS || !AO_USE_PTHREAD_DEFS */
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * This file is covered by the GNU general public license, version 2.
+ * see COPYING for details.
+ */
+
+/* Some basic sanity tests. These do not test the barrier semantics. */
+
+#undef TA_assert
+#define TA_assert(e) \
+ if (!(e)) { fprintf(stderr, "Assertion failed %s:%d (barrier: XX)\n", \
+ __FILE__, __LINE__), exit(1); }
+
+#undef MISSING
+#define MISSING(name) \
+ printf("Missing: %s\n", #name "XX")
+
+void test_atomicXX(void)
+{
+ AO_t x;
+ unsigned char b;
+ unsigned short s;
+ unsigned int zz;
+# if defined(AO_HAVE_test_and_setXX)
+ AO_TS_t z = AO_TS_INITIALIZER;
+# endif
+# if defined(AO_HAVE_double_compare_and_swapXX) \
+ || defined(AO_HAVE_double_loadXX) \
+ || defined(AO_HAVE_double_storeXX)
+ AO_double_t old_w;
+ AO_double_t new_w;
+# endif
+# if defined(AO_HAVE_compare_and_swap_doubleXX) \
+ || defined(AO_HAVE_compare_double_and_swap_doubleXX) \
+ || defined(AO_HAVE_double_compare_and_swapXX)
+ AO_double_t w;
+ w.AO_val1 = 0;
+ w.AO_val2 = 0;
+# endif
+
+# if defined(AO_HAVE_nopXX)
+ AO_nopXX();
+# elif !defined(AO_HAVE_nop) || !defined(AO_HAVE_nop_full) \
+ || !defined(AO_HAVE_nop_read) || !defined(AO_HAVE_nop_write)
+ MISSING(AO_nop);
+# endif
+# if defined(AO_HAVE_storeXX)
+ AO_storeXX(&x, 13);
+ TA_assert (x == 13);
+# else
+# if !defined(AO_HAVE_store) || !defined(AO_HAVE_store_full) \
+ || !defined(AO_HAVE_store_release) \
+ || !defined(AO_HAVE_store_release_write) \
+ || !defined(AO_HAVE_store_write)
+ MISSING(AO_store);
+# endif
+ x = 13;
+# endif
+# if defined(AO_HAVE_loadXX)
+ TA_assert(AO_loadXX(&x) == 13);
+# elif !defined(AO_HAVE_load) || !defined(AO_HAVE_load_acquire) \
+ || !defined(AO_HAVE_load_acquire_read) \
+ || !defined(AO_HAVE_load_dd_acquire_read) \
+ || !defined(AO_HAVE_load_full) || !defined(AO_HAVE_load_read)
+ MISSING(AO_load);
+# endif
+# if defined(AO_HAVE_test_and_setXX)
+ assert(AO_test_and_setXX(&z) == AO_TS_CLEAR);
+ assert(AO_test_and_setXX(&z) == AO_TS_SET);
+ assert(AO_test_and_setXX(&z) == AO_TS_SET);
+ AO_CLEAR(&z);
+# else
+ MISSING(AO_test_and_set);
+# endif
+# if defined(AO_HAVE_fetch_and_addXX)
+ TA_assert(AO_fetch_and_addXX(&x, 42) == 13);
+ TA_assert(AO_fetch_and_addXX(&x, (AO_t)(-42)) == 55);
+# else
+ MISSING(AO_fetch_and_add);
+# endif
+# if defined(AO_HAVE_fetch_and_add1XX)
+ TA_assert(AO_fetch_and_add1XX(&x) == 13);
+# else
+ MISSING(AO_fetch_and_add1);
+ ++x;
+# endif
+# if defined(AO_HAVE_fetch_and_sub1XX)
+ TA_assert(AO_fetch_and_sub1XX(&x) == 14);
+# else
+ MISSING(AO_fetch_and_sub1);
+ --x;
+# endif
+# if defined(AO_HAVE_short_storeXX)
+ AO_short_storeXX(&s, 13);
+# else
+# if !defined(AO_HAVE_short_store) || !defined(AO_HAVE_short_store_full) \
+ || !defined(AO_HAVE_short_store_release) \
+ || !defined(AO_HAVE_short_store_release_write) \
+ || !defined(AO_HAVE_short_store_write)
+ MISSING(AO_short_store);
+# endif
+ s = 13;
+# endif
+# if defined(AO_HAVE_short_loadXX)
+ TA_assert(AO_short_load(&s) == 13);
+# elif !defined(AO_HAVE_short_load) || !defined(AO_HAVE_short_load_acquire) \
+ || !defined(AO_HAVE_short_load_acquire_read) \
+ || !defined(AO_HAVE_short_load_dd_acquire_read) \
+ || !defined(AO_HAVE_short_load_full) \
+ || !defined(AO_HAVE_short_load_read)
+ MISSING(AO_short_load);
+# endif
+# if defined(AO_HAVE_short_fetch_and_addXX)
+ TA_assert(AO_short_fetch_and_addXX(&s, 42) == 13);
+ TA_assert(AO_short_fetch_and_addXX(&s, (unsigned short)-42) == 55);
+# else
+ MISSING(AO_short_fetch_and_add);
+# endif
+# if defined(AO_HAVE_short_fetch_and_add1XX)
+ TA_assert(AO_short_fetch_and_add1XX(&s) == 13);
+# else
+ MISSING(AO_short_fetch_and_add1);
+ ++s;
+# endif
+# if defined(AO_HAVE_short_fetch_and_sub1XX)
+ TA_assert(AO_short_fetch_and_sub1XX(&s) == 14);
+# else
+ MISSING(AO_short_fetch_and_sub1);
+ --s;
+# endif
+# if defined(AO_HAVE_char_storeXX)
+ AO_char_storeXX(&b, 13);
+# else
+# if !defined(AO_HAVE_char_store) || !defined(AO_HAVE_char_store_full) \
+ || !defined(AO_HAVE_char_store_release) \
+ || !defined(AO_HAVE_char_store_release_write) \
+ || !defined(AO_HAVE_char_store_write)
+ MISSING(AO_char_store);
+# endif
+ b = 13;
+# endif
+# if defined(AO_HAVE_char_loadXX)
+ TA_assert(AO_char_load(&b) == 13);
+# elif !defined(AO_HAVE_char_load) || !defined(AO_HAVE_char_load_acquire) \
+ || !defined(AO_HAVE_char_load_acquire_read) \
+ || !defined(AO_HAVE_char_load_dd_acquire_read) \
+ || !defined(AO_HAVE_char_load_full) || !defined(AO_HAVE_char_load_read)
+ MISSING(AO_char_load);
+# endif
+# if defined(AO_HAVE_char_fetch_and_addXX)
+ TA_assert(AO_char_fetch_and_addXX(&b, 42) == 13);
+ TA_assert(AO_char_fetch_and_addXX(&b, (unsigned char)-42) == 55);
+# else
+ MISSING(AO_char_fetch_and_add);
+# endif
+# if defined(AO_HAVE_char_fetch_and_add1XX)
+ TA_assert(AO_char_fetch_and_add1XX(&b) == 13);
+# else
+ MISSING(AO_char_fetch_and_add1);
+ ++b;
+# endif
+# if defined(AO_HAVE_char_fetch_and_sub1XX)
+ TA_assert(AO_char_fetch_and_sub1XX(&b) == 14);
+# else
+ MISSING(AO_char_fetch_and_sub1);
+ --b;
+# endif
+# if defined(AO_HAVE_int_storeXX)
+ AO_int_storeXX(&zz, 13);
+# else
+# if !defined(AO_HAVE_int_store) || !defined(AO_HAVE_int_store_full) \
+ || !defined(AO_HAVE_int_store_release) \
+ || !defined(AO_HAVE_int_store_release_write) \
+ || !defined(AO_HAVE_int_store_write)
+ MISSING(AO_int_store);
+# endif
+ zz = 13;
+# endif
+# if defined(AO_HAVE_int_loadXX)
+ TA_assert(AO_int_load(&zz) == 13);
+# elif !defined(AO_HAVE_int_load) || !defined(AO_HAVE_int_load_acquire) \
+ || !defined(AO_HAVE_int_load_acquire_read) \
+ || !defined(AO_HAVE_int_load_dd_acquire_read) \
+ || !defined(AO_HAVE_int_load_full) || !defined(AO_HAVE_int_load_read)
+ MISSING(AO_int_load);
+# endif
+# if defined(AO_HAVE_int_fetch_and_addXX)
+ TA_assert(AO_int_fetch_and_addXX(&zz, 42) == 13);
+ TA_assert(AO_int_fetch_and_addXX(&zz, (unsigned int)-42) == 55);
+# else
+ MISSING(AO_int_fetch_and_add);
+# endif
+# if defined(AO_HAVE_int_fetch_and_add1XX)
+ TA_assert(AO_int_fetch_and_add1XX(&zz) == 13);
+# else
+ MISSING(AO_int_fetch_and_add1);
+ ++zz;
+# endif
+# if defined(AO_HAVE_int_fetch_and_sub1XX)
+ TA_assert(AO_int_fetch_and_sub1XX(&zz) == 14);
+# else
+ MISSING(AO_int_fetch_and_sub1);
+ --zz;
+# endif
+# if defined(AO_HAVE_compare_and_swapXX)
+ TA_assert(!AO_compare_and_swapXX(&x, 14, 42));
+ TA_assert(x == 13);
+ TA_assert(AO_compare_and_swapXX(&x, 13, 42));
+ TA_assert(x == 42);
+# else
+ MISSING(AO_compare_and_swap);
+ if (x == 13) x = 42;
+# endif
+# if defined(AO_HAVE_orXX)
+ AO_orXX(&x, 66);
+ TA_assert(x == 106);
+# else
+ MISSING(AO_or);
+ x |= 66;
+# endif
+# if defined(AO_HAVE_xorXX)
+ AO_xorXX(&x, 181);
+ TA_assert(x == 223);
+# else
+ MISSING(AO_xor);
+ x ^= 181;
+# endif
+# if defined(AO_HAVE_andXX)
+ AO_andXX(&x, 57);
+ TA_assert(x == 25);
+# else
+ MISSING(AO_and);
+ x &= 57;
+# endif
+# if defined(AO_HAVE_fetch_compare_and_swapXX)
+ TA_assert(AO_fetch_compare_and_swapXX(&x, 14, 117) == 25);
+ TA_assert(x == 25);
+ TA_assert(AO_fetch_compare_and_swapXX(&x, 25, 117) == 25);
+ TA_assert(x == 117);
+# else
+ MISSING(AO_fetch_compare_and_swap);
+ if (x == 25) x = 117;
+# endif
+# if defined(AO_HAVE_double_loadXX)
+ old_w.AO_val1 = 3316;
+ old_w.AO_val2 = 2921;
+ new_w = AO_double_loadXX(&old_w);
+ TA_assert(new_w.AO_val1 == 3316 && new_w.AO_val2 == 2921);
+# elif !defined(AO_HAVE_double_load) \
+ || !defined(AO_HAVE_double_load_acquire) \
+ || !defined(AO_HAVE_double_load_acquire_read) \
+ || !defined(AO_HAVE_double_load_dd_acquire_read) \
+ || !defined(AO_HAVE_double_load_full) \
+ || !defined(AO_HAVE_double_load_read)
+ MISSING(AO_double_load);
+# endif
+# if defined(AO_HAVE_double_storeXX)
+ new_w.AO_val1 = 1375;
+ new_w.AO_val2 = 8243;
+ AO_double_storeXX(&old_w, new_w);
+ TA_assert(old_w.AO_val1 == 1375 && old_w.AO_val2 == 8243);
+ AO_double_storeXX(&old_w, new_w);
+ TA_assert(old_w.AO_val1 == 1375 && old_w.AO_val2 == 8243);
+ new_w.AO_val1 ^= old_w.AO_val1;
+ new_w.AO_val2 ^= old_w.AO_val2;
+ AO_double_storeXX(&old_w, new_w);
+ TA_assert(old_w.AO_val1 == 0 && old_w.AO_val2 == 0);
+# elif !defined(AO_HAVE_double_store) \
+ || !defined(AO_HAVE_double_store_full) \
+ || !defined(AO_HAVE_double_store_release) \
+ || !defined(AO_HAVE_double_store_release_write) \
+ || !defined(AO_HAVE_double_store_write)
+ MISSING(AO_double_store);
+# endif
+# if defined(AO_HAVE_compare_double_and_swap_doubleXX)
+ TA_assert(!AO_compare_double_and_swap_doubleXX(&w, 17, 42, 12, 13));
+ TA_assert(w.AO_val1 == 0 && w.AO_val2 == 0);
+ TA_assert(AO_compare_double_and_swap_doubleXX(&w, 0, 0, 12, 13));
+ TA_assert(w.AO_val1 == 12 && w.AO_val2 == 13);
+ TA_assert(!AO_compare_double_and_swap_doubleXX(&w, 12, 14, 64, 33));
+ TA_assert(w.AO_val1 == 12 && w.AO_val2 == 13);
+ TA_assert(!AO_compare_double_and_swap_doubleXX(&w, 11, 13, 85, 82));
+ TA_assert(w.AO_val1 == 12 && w.AO_val2 == 13);
+ TA_assert(!AO_compare_double_and_swap_doubleXX(&w, 13, 12, 17, 42));
+ TA_assert(w.AO_val1 == 12 && w.AO_val2 == 13);
+ TA_assert(AO_compare_double_and_swap_doubleXX(&w, 12, 13, 17, 42));
+ TA_assert(w.AO_val1 == 17 && w.AO_val2 == 42);
+ TA_assert(AO_compare_double_and_swap_doubleXX(&w, 17, 42, 0, 0));
+ TA_assert(w.AO_val1 == 0 && w.AO_val2 == 0);
+# else
+ MISSING(AO_compare_double_and_swap_double);
+# endif
+# if defined(AO_HAVE_compare_and_swap_doubleXX)
+ TA_assert(!AO_compare_and_swap_doubleXX(&w, 17, 12, 13));
+ TA_assert(w.AO_val1 == 0 && w.AO_val2 == 0);
+ TA_assert(AO_compare_and_swap_doubleXX(&w, 0, 12, 13));
+ TA_assert(w.AO_val1 == 12 && w.AO_val2 == 13);
+ TA_assert(!AO_compare_and_swap_doubleXX(&w, 13, 12, 33));
+ TA_assert(w.AO_val1 == 12 && w.AO_val2 == 13);
+ TA_assert(!AO_compare_and_swap_doubleXX(&w, 1213, 48, 86));
+ TA_assert(w.AO_val1 == 12 && w.AO_val2 == 13);
+ TA_assert(AO_compare_and_swap_doubleXX(&w, 12, 17, 42));
+ TA_assert(w.AO_val1 == 17 && w.AO_val2 == 42);
+ TA_assert(AO_compare_and_swap_doubleXX(&w, 17, 0, 0));
+ TA_assert(w.AO_val1 == 0 && w.AO_val2 == 0);
+# else
+ MISSING(AO_compare_and_swap_double);
+# endif
+# if defined(AO_HAVE_double_compare_and_swapXX)
+ old_w.AO_val1 = 4116;
+ old_w.AO_val2 = 2121;
+ new_w.AO_val1 = 8537;
+ new_w.AO_val2 = 6410;
+ TA_assert(!AO_double_compare_and_swapXX(&w, old_w, new_w));
+ TA_assert(w.AO_val1 == 0 && w.AO_val2 == 0);
+ TA_assert(AO_double_compare_and_swapXX(&w, w, new_w));
+ TA_assert(w.AO_val1 == 8537 && w.AO_val2 == 6410);
+ old_w.AO_val1 = new_w.AO_val1;
+ old_w.AO_val2 = 29;
+ new_w.AO_val1 = 820;
+ new_w.AO_val2 = 5917;
+ TA_assert(!AO_double_compare_and_swapXX(&w, old_w, new_w));
+ TA_assert(w.AO_val1 == 8537 && w.AO_val2 == 6410);
+ old_w.AO_val1 = 11;
+ old_w.AO_val2 = 6410;
+ new_w.AO_val1 = 3552;
+ new_w.AO_val2 = 1746;
+ TA_assert(!AO_double_compare_and_swapXX(&w, old_w, new_w));
+ TA_assert(w.AO_val1 == 8537 && w.AO_val2 == 6410);
+ old_w.AO_val1 = old_w.AO_val2;
+ old_w.AO_val2 = 8537;
+ new_w.AO_val1 = 4116;
+ new_w.AO_val2 = 2121;
+ TA_assert(!AO_double_compare_and_swapXX(&w, old_w, new_w));
+ TA_assert(w.AO_val1 == 8537 && w.AO_val2 == 6410);
+ old_w.AO_val1 = old_w.AO_val2;
+ old_w.AO_val2 = 6410;
+ new_w.AO_val1 = 1;
+ TA_assert(AO_double_compare_and_swapXX(&w, old_w, new_w));
+ TA_assert(w.AO_val1 == 1 && w.AO_val2 == 2121);
+ old_w.AO_val1 = new_w.AO_val1;
+ old_w.AO_val2 = w.AO_val2;
+ new_w.AO_val1--;
+ new_w.AO_val2 = 0;
+ TA_assert(AO_double_compare_and_swapXX(&w, old_w, new_w));
+ TA_assert(w.AO_val1 == 0 && w.AO_val2 == 0);
+# else
+ MISSING(AO_double_compare_and_swap);
+# endif
+}
--- /dev/null
+/*
+ * Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
+ *
+ * This file may be redistributed and/or modified under the
+ * terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2, or (at your option) any later version.
+ *
+ * It is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License in the
+ * file COPYING for more details.
+ */
+
+#if defined(HAVE_CONFIG_H)
+# include "config.h"
+#endif
+
+#include "run_parallel.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include "atomic_ops_malloc.h"
+
+#ifndef MAX_NTHREADS
+# define MAX_NTHREADS 100
+#endif
+
+#ifndef DEFAULT_NTHREADS
+# ifdef HAVE_MMAP
+# define DEFAULT_NTHREADS 10
+# else
+# define DEFAULT_NTHREADS 3
+# endif
+#endif
+
+#ifndef N_REVERSALS
+# ifdef AO_USE_PTHREAD_DEFS
+# define N_REVERSALS 4
+# else
+# define N_REVERSALS 1000 /* must be even */
+# endif
+#endif
+
+#ifndef LIST_LENGTH
+# ifdef HAVE_MMAP
+# define LIST_LENGTH 1000
+# else
+# define LIST_LENGTH 100
+# endif
+#endif
+
+#ifndef LARGE_OBJ_SIZE
+# ifdef HAVE_MMAP
+# define LARGE_OBJ_SIZE 200000
+# else
+# define LARGE_OBJ_SIZE 20000
+# endif
+#endif
+
+#ifdef USE_STANDARD_MALLOC
+# define AO_malloc(n) malloc(n)
+# define AO_free(p) free(p)
+# define AO_malloc_enable_mmap()
+#endif
+
+typedef struct list_node {
+ struct list_node *next;
+ int data;
+} ln;
+
+ln *cons(int d, ln *tail)
+{
+ static size_t extra = 0;
+ size_t my_extra = extra;
+ ln *result;
+ int * extras;
+ unsigned i;
+
+ if (my_extra > 100)
+ extra = my_extra = 0;
+ else
+ ++extra;
+ result = AO_malloc(sizeof(ln) + sizeof(int)*my_extra);
+ if (result == 0)
+ {
+ fprintf(stderr, "Out of memory\n");
+ /* Normal for more than about 10 threads without mmap? */
+ exit(2);
+ }
+
+ result -> data = d;
+ result -> next = tail;
+ extras = (int *)(result+1);
+ for (i = 0; i < my_extra; ++i) extras[i] = 42;
+ return result;
+}
+
+void print_list(ln *l)
+{
+ ln *p;
+
+ for (p = l; p != 0; p = p -> next)
+ {
+ printf("%d, ", p -> data);
+ }
+ printf("\n");
+}
+
+/* Check that l contains numbers from m to n inclusive in ascending order */
+void check_list(ln *l, int m, int n)
+{
+ ln *p;
+ int i;
+
+ for (p = l, i = m; p != 0 && i <= n; p = p -> next, ++i)
+ {
+ if (i != p -> data)
+ {
+ fprintf(stderr, "Found %d, expected %d\n", p -> data, i);
+ abort();
+ }
+ }
+ if (i <= n)
+ {
+ fprintf(stderr, "Number not found: %d\n", i);
+ abort();
+ }
+ if (p != 0)
+ {
+ fprintf(stderr, "Found unexpected number: %d\n", i);
+ abort();
+ }
+}
+
+/* Create a list of integers from m to n */
+ln *
+make_list(int m, int n)
+{
+ if (m > n) return 0;
+ return cons(m, make_list(m+1, n));
+}
+
+/* Reverse list x, and concatenate it to y, deallocating no longer needed */
+/* nodes in x. */
+ln *
+reverse(ln *x, ln *y)
+{
+ ln * result;
+
+ if (x == 0) return y;
+ result = reverse(x -> next, cons(x -> data, y));
+ AO_free(x);
+ return result;
+}
+
+int dummy_test(void) { return 1; }
+
+void * run_one_test(void * arg) {
+ ln * x = make_list(1, LIST_LENGTH);
+ int i;
+ char *p = AO_malloc(LARGE_OBJ_SIZE);
+ char *q;
+
+ if (0 == p) {
+# ifdef HAVE_MMAP
+ fprintf(stderr, "AO_malloc(%d) failed\n", LARGE_OBJ_SIZE);
+ abort();
+# else
+ fprintf(stderr, "AO_malloc(%d) failed: This is normal without mmap\n",
+ LARGE_OBJ_SIZE);
+# endif
+ } else {
+ p[0] = p[LARGE_OBJ_SIZE/2] = p[LARGE_OBJ_SIZE-1] = 'a';
+ q = AO_malloc(LARGE_OBJ_SIZE);
+ if (q == 0)
+ {
+ fprintf(stderr, "Out of memory\n");
+ /* Normal for more than about 10 threads without mmap? */
+ exit(2);
+ }
+ q[0] = q[LARGE_OBJ_SIZE/2] = q[LARGE_OBJ_SIZE-1] = 'b';
+ if (p[0] != 'a' || p[LARGE_OBJ_SIZE/2] != 'a'
+ || p[LARGE_OBJ_SIZE-1] != 'a') {
+ fprintf(stderr, "First large allocation smashed\n");
+ abort();
+ }
+ AO_free(p);
+ if (q[0] != 'b' || q[LARGE_OBJ_SIZE/2] != 'b'
+ || q[LARGE_OBJ_SIZE-1] != 'b') {
+ fprintf(stderr, "Second large allocation smashed\n");
+ abort();
+ }
+ AO_free(q);
+ }
+# ifdef DEBUG_RUN_ONE_TEST
+ x = reverse(x, 0);
+ print_list(x);
+ x = reverse(x, 0);
+ print_list(x);
+# endif
+ for (i = 0; i < N_REVERSALS; ++i) {
+ x = reverse(x, 0);
+ }
+ check_list(x, 1, LIST_LENGTH);
+ return arg; /* use arg to suppress compiler warning */
+}
+
+int main(int argc, char **argv) {
+ int nthreads;
+
+ if (1 == argc) {
+ nthreads = DEFAULT_NTHREADS;
+ } else if (2 == argc) {
+ nthreads = atoi(argv[1]);
+ if (nthreads < 1 || nthreads > MAX_NTHREADS) {
+ fprintf(stderr, "Invalid # of threads argument\n");
+ exit(1);
+ }
+ } else {
+ fprintf(stderr, "Usage: %s [# of threads]\n", argv[0]);
+ exit(1);
+ }
+ printf("Performing %d reversals of %d element lists in %d threads\n",
+ N_REVERSALS, LIST_LENGTH, nthreads);
+ AO_malloc_enable_mmap();
+ run_parallel(nthreads, run_one_test, dummy_test, "AO_malloc/AO_free");
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
+ *
+ * This file may be redistributed and/or modified under the
+ * terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2, or (at your option) any later version.
+ *
+ * It is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License in the
+ * file COPYING for more details.
+ */
+
+#if defined(HAVE_CONFIG_H)
+# include "config.h"
+#endif
+
+#include <stdio.h>
+
+#if defined(__vxworks)
+
+ int main(void)
+ {
+ printf("test skipped\n");
+ return 0;
+ }
+
+#else
+
+#if ((defined(_WIN32) && !defined(__CYGWIN32__) && !defined(__CYGWIN__)) \
+ || defined(_MSC_VER) || defined(_WIN32_WINCE)) \
+ && !defined(AO_USE_WIN32_PTHREADS)
+# define USE_WINTHREADS
+#endif
+
+#ifdef USE_WINTHREADS
+# include <windows.h>
+#else
+# include <pthread.h>
+#endif
+
+#include <stdlib.h>
+
+#include "atomic_ops_stack.h" /* includes atomic_ops.h as well */
+
+#if (defined(_WIN32_WCE) || defined(__MINGW32CE__)) && !defined(abort)
+# define abort() _exit(-1) /* there is no abort() in WinCE */
+#endif
+
+#ifndef MAX_NTHREADS
+# define MAX_NTHREADS 100
+#endif
+
+#ifdef NO_TIMES
+# define get_msecs() 0
+#elif defined(USE_WINTHREADS) || defined(AO_USE_WIN32_PTHREADS)
+# include <sys/timeb.h>
+ long long get_msecs(void)
+ {
+ struct timeb tb;
+
+ ftime(&tb);
+ return (long long)tb.time * 1000 + tb.millitm;
+ }
+#else /* Unix */
+# include <time.h>
+# include <sys/time.h>
+ /* Need 64-bit long long support */
+ long long get_msecs(void)
+ {
+ struct timeval tv;
+
+ gettimeofday(&tv, 0);
+ return (long long)tv.tv_sec * 1000 + tv.tv_usec/1000;
+ }
+#endif /* !NO_TIMES */
+
+typedef struct le {
+ AO_t next;
+ int data;
+} list_element;
+
+AO_stack_t the_list = AO_STACK_INITIALIZER;
+
+void add_elements(int n)
+{
+ list_element * le;
+ if (n == 0) return;
+ add_elements(n-1);
+ le = malloc(sizeof(list_element));
+ if (le == 0)
+ {
+ fprintf(stderr, "Out of memory\n");
+ exit(2);
+ }
+ le -> data = n;
+ AO_stack_push(&the_list, (AO_t *)le);
+}
+
+void print_list(void)
+{
+ list_element *p;
+
+ for (p = (list_element *)AO_REAL_HEAD_PTR(the_list);
+ p != 0;
+ p = (list_element *)AO_REAL_NEXT_PTR(p -> next))
+ printf("%d\n", p -> data);
+}
+
+static char marks[MAX_NTHREADS * (MAX_NTHREADS + 1) / 2 + 1];
+
+void check_list(int n)
+{
+ list_element *p;
+ int i;
+
+ for (i = 1; i <= n; ++i) marks[i] = 0;
+
+ for (p = (list_element *)AO_REAL_HEAD_PTR(the_list);
+ p != 0;
+ p = (list_element *)AO_REAL_NEXT_PTR(p -> next))
+ {
+ i = p -> data;
+ if (i > n || i <= 0)
+ {
+ fprintf(stderr, "Found erroneous list element %d\n", i);
+ abort();
+ }
+ if (marks[i] != 0)
+ {
+ fprintf(stderr, "Found duplicate list element %d\n", i);
+ abort();
+ }
+ marks[i] = 1;
+ }
+
+ for (i = 1; i <= n; ++i)
+ if (marks[i] != 1)
+ {
+ fprintf(stderr, "Missing list element %d\n", i);
+ abort();
+ }
+}
+
+volatile AO_t ops_performed = 0;
+
+#ifndef LIMIT
+ /* Total number of push/pop ops in all threads per test. */
+# ifdef AO_USE_PTHREAD_DEFS
+# define LIMIT 20000
+# else
+# define LIMIT 1000000
+# endif
+#endif
+
+#ifdef AO_HAVE_fetch_and_add
+# define fetch_and_add(addr, val) AO_fetch_and_add(addr, val)
+#else
+ /* Fake it. This is really quite unacceptable for timing */
+ /* purposes. But as a correctness test, it should be OK. */
+ AO_INLINE AO_t fetch_and_add(volatile AO_t * addr, AO_t val)
+ {
+ AO_t result = AO_load(addr);
+ AO_store(addr, result + val);
+ return result;
+ }
+#endif
+
+#ifdef USE_WINTHREADS
+ DWORD WINAPI run_one_test(LPVOID arg)
+#else
+ void * run_one_test(void * arg)
+#endif
+{
+ list_element * t[MAX_NTHREADS + 1];
+ int index = (int)(size_t)arg;
+ int i;
+# ifdef VERBOSE
+ int j = 0;
+
+ printf("starting thread %d\n", index);
+# endif
+ while (fetch_and_add(&ops_performed, index + 1) + index + 1 < LIMIT)
+ {
+ for (i = 0; i < index + 1; ++i)
+ {
+ t[i] = (list_element *)AO_stack_pop(&the_list);
+ if (0 == t[i])
+ {
+ fprintf(stderr, "FAILED\n");
+ abort();
+ }
+ }
+ for (i = 0; i < index + 1; ++i)
+ {
+ AO_stack_push(&the_list, (AO_t *)t[i]);
+ }
+# ifdef VERBOSE
+ j += index + 1;
+# endif
+ }
+# ifdef VERBOSE
+ printf("finished thread %d: %d total ops\n", index, j);
+# endif
+ return 0;
+}
+
+#ifndef N_EXPERIMENTS
+# define N_EXPERIMENTS 1
+#endif
+
+unsigned long times[MAX_NTHREADS + 1][N_EXPERIMENTS];
+
+int main(int argc, char **argv)
+{
+ int nthreads;
+ int max_nthreads;
+ int exper_n;
+
+ if (1 == argc)
+ max_nthreads = 4;
+ else if (2 == argc)
+ {
+ max_nthreads = atoi(argv[1]);
+ if (max_nthreads < 1 || max_nthreads > MAX_NTHREADS)
+ {
+ fprintf(stderr, "Invalid max # of threads argument\n");
+ exit(1);
+ }
+ }
+ else
+ {
+ fprintf(stderr, "Usage: %s [max # of threads]\n", argv[0]);
+ exit(1);
+ }
+ for (exper_n = 0; exper_n < N_EXPERIMENTS; ++ exper_n)
+ for (nthreads = 1; nthreads <= max_nthreads; ++nthreads)
+ {
+ int i;
+# ifdef USE_WINTHREADS
+ DWORD thread_id;
+ HANDLE thread[MAX_NTHREADS];
+# else
+ pthread_t thread[MAX_NTHREADS];
+# endif
+ int list_length = nthreads*(nthreads+1)/2;
+ long long start_time;
+ list_element * le;
+
+# ifdef VERBOSE
+ printf("Before add_elements: exper_n=%d, nthreads=%d,"
+ " max_nthreads=%d, list_length=%d\n",
+ exper_n, nthreads, max_nthreads, list_length);
+# endif
+ add_elements(list_length);
+# ifdef VERBOSE
+ printf("Initial list (nthreads = %d):\n", nthreads);
+ print_list();
+# endif
+ ops_performed = 0;
+ start_time = get_msecs();
+ for (i = 1; i < nthreads; ++i) {
+ int code;
+
+# ifdef USE_WINTHREADS
+ thread[i] = CreateThread(NULL, 0, run_one_test, (LPVOID)(size_t)i,
+ 0, &thread_id);
+ code = thread[i] != NULL ? 0 : (int)GetLastError();
+# else
+ code = pthread_create(&thread[i], 0, run_one_test,
+ (void *)(size_t)i);
+# endif
+ if (code != 0) {
+ fprintf(stderr, "Thread creation failed %u\n", (unsigned)code);
+ exit(3);
+ }
+ }
+ /* We use the main thread to run one test. This allows gprof */
+ /* profiling to work, for example. */
+ run_one_test(0);
+ for (i = 1; i < nthreads; ++i) {
+ int code;
+
+# ifdef USE_WINTHREADS
+ code = WaitForSingleObject(thread[i], INFINITE) == WAIT_OBJECT_0 ?
+ 0 : (int)GetLastError();
+# else
+ code = pthread_join(thread[i], 0);
+# endif
+ if (code != 0) {
+ fprintf(stderr, "Thread join failed %u\n", (unsigned)code);
+ abort();
+ }
+ }
+ times[nthreads][exper_n] = (unsigned long)(get_msecs() - start_time);
+ # ifdef VERBOSE
+ printf("%d %lu\n", nthreads,
+ (unsigned long)(get_msecs() - start_time));
+ printf("final list (should be reordered initial list):\n");
+ print_list();
+ # endif
+ check_list(list_length);
+ while ((le = (list_element *)AO_stack_pop(&the_list)) != 0)
+ free(le);
+ }
+ for (nthreads = 1; nthreads <= max_nthreads; ++nthreads)
+ {
+# ifndef NO_TIMES
+ unsigned long sum = 0;
+# endif
+
+ printf("About %d pushes + %d pops in %d threads:",
+ LIMIT, LIMIT, nthreads);
+# ifndef NO_TIMES
+ for (exper_n = 0; exper_n < N_EXPERIMENTS; ++exper_n) {
+# if defined(VERBOSE)
+ printf(" [%lu]", times[nthreads][exper_n]);
+# endif
+ sum += times[nthreads][exper_n];
+ }
+ printf(" %lu msecs\n", (sum + N_EXPERIMENTS/2)/N_EXPERIMENTS);
+# else
+ printf(" completed\n");
+# endif
+ }
+ return 0;
+}
+
+#endif