2017-09-19 22:54:40 +02:00
|
|
|
//
|
|
|
|
// Copyright 2017 The Abseil Authors.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
2019-03-08 16:27:53 +01:00
|
|
|
// https://www.apache.org/licenses/LICENSE-2.0
|
2017-09-19 22:54:40 +02:00
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
//
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
// File: optimization.h
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
//
|
|
|
|
// This header file defines portable macros for performance optimization.
|
|
|
|
|
|
|
|
#ifndef ABSL_BASE_OPTIMIZATION_H_
|
|
|
|
#define ABSL_BASE_OPTIMIZATION_H_
|
|
|
|
|
|
|
|
#include "absl/base/config.h"
|
|
|
|
|
|
|
|
// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION
|
|
|
|
//
|
|
|
|
// Instructs the compiler to avoid optimizing tail-call recursion. Use of this
|
|
|
|
// macro is useful when you wish to preserve the existing function order within
|
|
|
|
// a stack trace for logging, debugging, or profiling purposes.
|
|
|
|
//
|
|
|
|
// Example:
|
|
|
|
//
|
|
|
|
// int f() {
|
|
|
|
// int result = g();
|
|
|
|
// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
|
|
|
|
// return result;
|
|
|
|
// }
|
|
|
|
#if defined(__pnacl__)
|
|
|
|
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; }
|
|
|
|
#elif defined(__clang__)
|
|
|
|
// Clang will not tail call given inline volatile assembly.
|
|
|
|
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("")
|
|
|
|
#elif defined(__GNUC__)
|
|
|
|
// GCC will not tail call given inline volatile assembly.
|
|
|
|
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("")
|
|
|
|
#elif defined(_MSC_VER)
|
2017-09-29 17:44:28 +02:00
|
|
|
#include <intrin.h>
|
2017-09-19 22:54:40 +02:00
|
|
|
// The __nop() intrinsic blocks the optimisation.
|
|
|
|
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __nop()
|
|
|
|
#else
|
|
|
|
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; }
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// ABSL_CACHELINE_SIZE
|
|
|
|
//
|
|
|
|
// Explicitly defines the size of the L1 cache for purposes of alignment.
|
|
|
|
// Setting the cacheline size allows you to specify that certain objects be
|
|
|
|
// aligned on a cacheline boundary with `ABSL_CACHELINE_ALIGNED` declarations.
|
|
|
|
// (See below.)
|
|
|
|
//
|
|
|
|
// NOTE: this macro should be replaced with the following C++17 features, when
|
|
|
|
// those are generally available:
|
|
|
|
//
|
|
|
|
// * `std::hardware_constructive_interference_size`
|
|
|
|
// * `std::hardware_destructive_interference_size`
|
|
|
|
//
|
|
|
|
// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html
|
|
|
|
// for more information.
|
|
|
|
#if defined(__GNUC__)
|
|
|
|
// Cache line alignment
|
|
|
|
#if defined(__i386__) || defined(__x86_64__)
|
|
|
|
#define ABSL_CACHELINE_SIZE 64
|
|
|
|
#elif defined(__powerpc64__)
|
|
|
|
#define ABSL_CACHELINE_SIZE 128
|
|
|
|
#elif defined(__aarch64__)
|
|
|
|
// We would need to read special register ctr_el0 to find out L1 dcache size.
|
|
|
|
// This value is a good estimate based on a real aarch64 machine.
|
|
|
|
#define ABSL_CACHELINE_SIZE 64
|
|
|
|
#elif defined(__arm__)
|
|
|
|
// Cache line sizes for ARM: These values are not strictly correct since
|
|
|
|
// cache line sizes depend on implementations, not architectures. There
|
|
|
|
// are even implementations with cache line sizes configurable at boot
|
|
|
|
// time.
|
|
|
|
#if defined(__ARM_ARCH_5T__)
|
|
|
|
#define ABSL_CACHELINE_SIZE 32
|
|
|
|
#elif defined(__ARM_ARCH_7A__)
|
|
|
|
#define ABSL_CACHELINE_SIZE 64
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef ABSL_CACHELINE_SIZE
|
|
|
|
// A reasonable default guess. Note that overestimates tend to waste more
|
|
|
|
// space, while underestimates tend to waste more time.
|
|
|
|
#define ABSL_CACHELINE_SIZE 64
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// ABSL_CACHELINE_ALIGNED
|
|
|
|
//
|
|
|
|
// Indicates that the declared object be cache aligned using
|
|
|
|
// `ABSL_CACHELINE_SIZE` (see above). Cacheline aligning objects allows you to
|
|
|
|
// load a set of related objects in the L1 cache for performance improvements.
|
|
|
|
// Cacheline aligning objects properly allows constructive memory sharing and
|
|
|
|
// prevents destructive (or "false") memory sharing.
|
|
|
|
//
|
|
|
|
// NOTE: this macro should be replaced with usage of `alignas()` using
|
|
|
|
// `std::hardware_constructive_interference_size` and/or
|
|
|
|
// `std::hardware_destructive_interference_size` when available within C++17.
|
|
|
|
//
|
|
|
|
// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html
|
|
|
|
// for more information.
|
|
|
|
//
|
2018-12-27 11:49:35 +01:00
|
|
|
// On some compilers, `ABSL_CACHELINE_ALIGNED` expands to an `__attribute__`
|
|
|
|
// or `__declspec` attribute. For compilers where this is not known to work,
|
|
|
|
// the macro expands to nothing.
|
2017-09-19 22:54:40 +02:00
|
|
|
//
|
|
|
|
// No further guarantees are made here. The result of applying the macro
|
|
|
|
// to variables and types is always implementation-defined.
|
|
|
|
//
|
|
|
|
// WARNING: It is easy to use this attribute incorrectly, even to the point
|
|
|
|
// of causing bugs that are difficult to diagnose, crash, etc. It does not
|
|
|
|
// of itself guarantee that objects are aligned to a cache line.
|
|
|
|
//
|
2018-12-27 11:49:35 +01:00
|
|
|
// NOTE: Some compilers are picky about the locations of annotations such as
|
|
|
|
// this attribute, so prefer to put it at the beginning of your declaration.
|
|
|
|
// For example,
|
|
|
|
//
|
|
|
|
// ABSL_CACHELINE_ALIGNED static Foo* foo = ...
|
|
|
|
//
|
|
|
|
// class ABSL_CACHELINE_ALIGNED Bar { ...
|
|
|
|
//
|
2017-09-19 22:54:40 +02:00
|
|
|
// Recommendations:
|
|
|
|
//
|
|
|
|
// 1) Consult compiler documentation; this comment is not kept in sync as
|
|
|
|
// toolchains evolve.
|
|
|
|
// 2) Verify your use has the intended effect. This often requires inspecting
|
|
|
|
// the generated machine code.
|
|
|
|
// 3) Prefer applying this attribute to individual variables. Avoid
|
|
|
|
// applying it to types. This tends to localize the effect.
|
|
|
|
#define ABSL_CACHELINE_ALIGNED __attribute__((aligned(ABSL_CACHELINE_SIZE)))
|
2018-12-27 11:49:35 +01:00
|
|
|
#elif defined(_MSC_VER)
|
|
|
|
#define ABSL_CACHELINE_SIZE 64
|
|
|
|
#define ABSL_CACHELINE_ALIGNED __declspec(align(ABSL_CACHELINE_SIZE))
|
|
|
|
#else
|
2017-09-19 22:54:40 +02:00
|
|
|
#define ABSL_CACHELINE_SIZE 64
|
|
|
|
#define ABSL_CACHELINE_ALIGNED
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// ABSL_PREDICT_TRUE, ABSL_PREDICT_FALSE
|
|
|
|
//
|
|
|
|
// Enables the compiler to prioritize compilation using static analysis for
|
|
|
|
// likely paths within a boolean branch.
|
|
|
|
//
|
|
|
|
// Example:
|
|
|
|
//
|
|
|
|
// if (ABSL_PREDICT_TRUE(expression)) {
|
|
|
|
// return result; // Faster if more likely
|
|
|
|
// } else {
|
|
|
|
// return 0;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// Compilers can use the information that a certain branch is not likely to be
|
|
|
|
// taken (for instance, a CHECK failure) to optimize for the common case in
|
|
|
|
// the absence of better information (ie. compiling gcc with `-fprofile-arcs`).
|
2019-05-15 21:21:46 +02:00
|
|
|
//
|
|
|
|
// Recommendation: Modern CPUs dynamically predict branch execution paths,
|
|
|
|
// typically with accuracy greater than 97%. As a result, annotating every
|
|
|
|
// branch in a codebase is likely counterproductive; however, annotating
|
|
|
|
// specific branches that are both hot and consistently mispredicted is likely
|
|
|
|
// to yield performance improvements.
|
2017-09-19 22:54:40 +02:00
|
|
|
#if ABSL_HAVE_BUILTIN(__builtin_expect) || \
|
|
|
|
(defined(__GNUC__) && !defined(__clang__))
|
|
|
|
#define ABSL_PREDICT_FALSE(x) (__builtin_expect(x, 0))
|
Export of internal Abseil changes
--
e54b9c7bbb0c58475676c268e2e19c69f4bce48a by Jorg Brown <jorg@google.com>:
Tweak ABSL_PREDICT_TRUE slightly, for better code on some platforms and/or
optimization levels. "false || (x)" is more verbose than "!!(x)", but
ultimately more efficient.
For example, given this code:
void InitIfNecessary() {
if (ABSL_PREDICT_TRUE(NeedsInit())) {
SlowInitIfNecessary();
}
}
Clang with default optimization level will produce:
Before this CL After this CL
InitIfNecessary: InitIfNecessary:
push rbp push rbp
mov rbp, rsp mov rbp, rsp
call NeedsInit call NeedsInit
xor al, -1
xor al, -1
test al, 1 test al, 1
jne .LBB2_1 jne .LBB3_1
jmp .LBB2_2 jmp .LBB3_2
.LBB2_1: .LBB3_1:
call SlowInitIfNecessary call SlowInitIfNecessary
.LBB2_2: .LBB3_2:
pop rbp pop rbp
ret ret
PiperOrigin-RevId: 276401386
--
0a3c4dfd8342bf2b1b11a87f1c662c883f73cab7 by Abseil Team <absl-team@google.com>:
Fix comment nit: sem_open => sem_init.
The code calls sem_init, not sem_open, to initialize an unnamed semaphore.
(sem_open creates or opens a named semaphore.)
PiperOrigin-RevId: 276344072
--
b36a664e9459057509a90e83d3482e1d3a4c44c7 by Abseil Team <absl-team@google.com>:
Fix typo in flat_hash_map.h: exchaged -> exchanged
PiperOrigin-RevId: 276295792
--
7bbd8d18276eb110c8335743e35fceb662ddf3d6 by Samuel Benzaquen <sbenza@google.com>:
Add assertions to verify use of iterators.
PiperOrigin-RevId: 276283300
--
677398a8ffcb1f59182cffe57a4fe7ff147a0404 by Laramie Leavitt <lar@google.com>:
Migrate distribution_impl.h/cc to generate_real.h/cc.
Combine the methods RandU64To<Float,Double> into a single method:
GenerateRealFromBits().
Remove rejection sampling from absl::uniform_real_distribution.
PiperOrigin-RevId: 276158675
--
c60c9d11d24b0c546329d998e78e15a84b3153f5 by Abseil Team <absl-team@google.com>:
Internal change
PiperOrigin-RevId: 276126962
--
4c840cab6a8d86efa29b397cafaf7520eece68cc by Andy Soffer <asoffer@google.com>:
Update CMakeLists.txt to address https://github.com/abseil/abseil-cpp/issues/365.
This does not cover every platform, but it does at least address the
first-order issue of assuming gcc implies x86.
PiperOrigin-RevId: 276116253
--
98da366e6b5d51afe5d7ac6722126aca23d85ee6 by Abseil Team <absl-team@google.com>:
Internal change
PiperOrigin-RevId: 276097452
GitOrigin-RevId: e54b9c7bbb0c58475676c268e2e19c69f4bce48a
Change-Id: I02d84454bb71ab21ad3d39650acf6cc6e36f58d7
2019-10-24 04:35:39 +02:00
|
|
|
#define ABSL_PREDICT_TRUE(x) (__builtin_expect(false || (x), true))
|
2017-09-19 22:54:40 +02:00
|
|
|
#else
|
2018-06-08 17:14:48 +02:00
|
|
|
#define ABSL_PREDICT_FALSE(x) (x)
|
|
|
|
#define ABSL_PREDICT_TRUE(x) (x)
|
2017-09-19 22:54:40 +02:00
|
|
|
#endif
|
|
|
|
|
2020-04-03 22:24:29 +02:00
|
|
|
// ABSL_INTERNAL_ASSUME(cond)
|
|
|
|
// Informs the compiler than a condition is always true and that it can assume
|
|
|
|
// it to be true for optimization purposes. The call has undefined behavior if
|
|
|
|
// the condition is false.
|
|
|
|
// In !NDEBUG mode, the condition is checked with an assert().
|
|
|
|
// NOTE: The expression must not have side effects, as it will only be evaluated
|
|
|
|
// in some compilation modes and not others.
|
|
|
|
//
|
|
|
|
// Example:
|
|
|
|
//
|
|
|
|
// int x = ...;
|
|
|
|
// ABSL_INTERNAL_ASSUME(x >= 0);
|
|
|
|
// // The compiler can optimize the division to a simple right shift using the
|
|
|
|
// // assumption specified above.
|
|
|
|
// int y = x / 16;
|
|
|
|
//
|
|
|
|
#if !defined(NDEBUG)
|
|
|
|
#define ABSL_INTERNAL_ASSUME(cond) assert(cond)
|
|
|
|
#elif ABSL_HAVE_BUILTIN(__builtin_assume)
|
|
|
|
#define ABSL_INTERNAL_ASSUME(cond) __builtin_assume(cond)
|
|
|
|
#elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable)
|
|
|
|
#define ABSL_INTERNAL_ASSUME(cond) \
|
|
|
|
do { \
|
|
|
|
if (!(cond)) __builtin_unreachable(); \
|
|
|
|
} while (0)
|
|
|
|
#elif defined(_MSC_VER)
|
|
|
|
#define ABSL_INTERNAL_ASSUME(cond) __assume(cond)
|
|
|
|
#else
|
|
|
|
#define ABSL_INTERNAL_ASSUME(cond) \
|
|
|
|
do { \
|
|
|
|
static_cast<void>(false && (cond)); \
|
|
|
|
} while (0)
|
|
|
|
#endif
|
|
|
|
|
2020-05-13 00:05:26 +02:00
|
|
|
// ABSL_INTERNAL_UNIQUE_SMALL_NAME(cond)
|
|
|
|
// This macro forces small unique name on a static file level symbols like
|
|
|
|
// static local variables or static functions. This is intended to be used in
|
|
|
|
// macro definitions to optimize the cost of generated code. Do NOT use it on
|
|
|
|
// symbols exported from translation unit since it may casue a link time
|
|
|
|
// conflict.
|
|
|
|
//
|
|
|
|
// Example:
|
|
|
|
//
|
|
|
|
// #define MY_MACRO(txt)
|
|
|
|
// namespace {
|
|
|
|
// char VeryVeryLongVarName[] ABSL_INTERNAL_UNIQUE_SMALL_NAME() = txt;
|
|
|
|
// const char* VeryVeryLongFuncName() ABSL_INTERNAL_UNIQUE_SMALL_NAME();
|
|
|
|
// const char* VeryVeryLongFuncName() { return txt; }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
|
|
|
|
#if defined(__GNUC__)
|
|
|
|
#define ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) #x
|
|
|
|
#define ABSL_INTERNAL_UNIQUE_SMALL_NAME1(x) ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x)
|
|
|
|
#define ABSL_INTERNAL_UNIQUE_SMALL_NAME() \
|
|
|
|
asm(ABSL_INTERNAL_UNIQUE_SMALL_NAME1(.absl.__COUNTER__))
|
|
|
|
#else
|
|
|
|
#define ABSL_INTERNAL_UNIQUE_SMALL_NAME()
|
|
|
|
#endif
|
|
|
|
|
2017-09-19 22:54:40 +02:00
|
|
|
#endif // ABSL_BASE_OPTIMIZATION_H_
|