Bug Summary

File:out/../deps/v8/src/libplatform/default-job.cc
Warning:line 80, column 62
The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'uint32_t'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name default-job.cc -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/home/maurizio/node-v18.6.0/out -resource-dir /usr/local/lib/clang/16.0.0 -D _GLIBCXX_USE_CXX11_ABI=1 -D NODE_OPENSSL_CONF_NAME=nodejs_conf -D NODE_OPENSSL_HAS_QUIC -D V8_GYP_BUILD -D V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP=64 -D __STDC_FORMAT_MACROS -D OPENSSL_NO_PINSHARED -D OPENSSL_THREADS -D V8_TARGET_ARCH_X64 -D V8_HAVE_TARGET_OS -D V8_TARGET_OS_LINUX -D V8_EMBEDDER_STRING="-node.8" -D ENABLE_DISASSEMBLER -D V8_PROMISE_INTERNAL_FIELD_COUNT=1 -D V8_SHORT_BUILTIN_CALLS -D OBJECT_PRINT -D V8_INTL_SUPPORT -D V8_ATOMIC_OBJECT_FIELD_WRITES -D V8_ENABLE_LAZY_SOURCE_POSITIONS -D V8_USE_SIPHASH -D V8_SHARED_RO_HEAP -D V8_WIN64_UNWINDING_INFO -D V8_ENABLE_REGEXP_INTERPRETER_THREADED_DISPATCH -D V8_SNAPSHOT_COMPRESSION -D V8_ENABLE_WEBASSEMBLY -D V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS -D V8_ALLOCATION_FOLDING -D V8_ALLOCATION_SITE_TRACKING -D V8_SCRIPTORMODULE_LEGACY_LIFETIME -D V8_ADVANCED_BIGINT_ALGORITHMS -I ../deps/v8 -I ../deps/v8/include -internal-isystem /usr/lib/gcc/x86_64-redhat-linux/8/../../../../include/c++/8 -internal-isystem /usr/lib/gcc/x86_64-redhat-linux/8/../../../../include/c++/8/x86_64-redhat-linux -internal-isystem /usr/lib/gcc/x86_64-redhat-linux/8/../../../../include/c++/8/backward -internal-isystem /usr/local/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-redhat-linux/8/../../../../x86_64-redhat-linux/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wno-return-type -std=gnu++17 -fdeprecated-macro -fdebug-compilation-dir=/home/maurizio/node-v18.6.0/out -ferror-limit 19 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-08-22-142216-507842-1 -x c++ ../deps/v8/src/libplatform/default-job.cc

../deps/v8/src/libplatform/default-job.cc

1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/libplatform/default-job.h"
6
7#include "src/base/bits.h"
8#include "src/base/macros.h"
9
10namespace v8 {
11namespace platform {
12namespace {
13
14// Capped to allow assigning task_ids from a bitfield.
15constexpr size_t kMaxWorkersPerJob = 32;
16
17} // namespace
18
19DefaultJobState::JobDelegate::~JobDelegate() {
20 static_assert(kInvalidTaskId >= kMaxWorkersPerJob,
21 "kInvalidTaskId must be outside of the range of valid task_ids "
22 "[0, kMaxWorkersPerJob)");
23 if (task_id_ != kInvalidTaskId) outer_->ReleaseTaskId(task_id_);
24}
25
26uint8_t DefaultJobState::JobDelegate::GetTaskId() {
27 if (task_id_ == kInvalidTaskId) task_id_ = outer_->AcquireTaskId();
1
Assuming 'kInvalidTaskId' is equal to field 'task_id_'
2
Taking true branch
3
Calling 'DefaultJobState::AcquireTaskId'
28 return task_id_;
29}
30
31DefaultJobState::DefaultJobState(Platform* platform,
32 std::unique_ptr<JobTask> job_task,
33 TaskPriority priority,
34 size_t num_worker_threads)
35 : platform_(platform),
36 job_task_(std::move(job_task)),
37 priority_(priority),
38 num_worker_threads_(std::min(num_worker_threads, kMaxWorkersPerJob)) {}
39
40DefaultJobState::~DefaultJobState() { DCHECK_EQ(0U, active_workers_)((void) 0); }
41
42void DefaultJobState::NotifyConcurrencyIncrease() {
43 if (is_canceled_.load(std::memory_order_relaxed)) return;
44
45 size_t num_tasks_to_post = 0;
46 TaskPriority priority;
47 {
48 base::MutexGuard guard(&mutex_);
49 const size_t max_concurrency = CappedMaxConcurrency(active_workers_);
50 // Consider |pending_tasks_| to avoid posting too many tasks.
51 if (max_concurrency > (active_workers_ + pending_tasks_)) {
52 num_tasks_to_post = max_concurrency - active_workers_ - pending_tasks_;
53 pending_tasks_ += num_tasks_to_post;
54 }
55 priority = priority_;
56 }
57 // Post additional worker tasks to reach |max_concurrency|.
58 for (size_t i = 0; i < num_tasks_to_post; ++i) {
59 CallOnWorkerThread(priority, std::make_unique<DefaultJobWorker>(
60 shared_from_this(), job_task_.get()));
61 }
62}
63
64uint8_t DefaultJobState::AcquireTaskId() {
65 static_assert(kMaxWorkersPerJob <= sizeof(assigned_task_ids_) * 8,
66 "TaskId bitfield isn't big enough to fit kMaxWorkersPerJob.");
67 uint32_t assigned_task_ids =
68 assigned_task_ids_.load(std::memory_order_relaxed);
69 DCHECK_LE(v8::base::bits::CountPopulation(assigned_task_ids) + 1,((void) 0)
70 kMaxWorkersPerJob)((void) 0);
71 uint32_t new_assigned_task_ids = 0;
72 uint8_t task_id = 0;
73 // memory_order_acquire on success, matched with memory_order_release in
74 // ReleaseTaskId() so that operations done by previous threads that had
75 // the same task_id become visible to the current thread.
76 do {
77 // Count trailing one bits. This is the id of the right-most 0-bit in
78 // |assigned_task_ids|.
79 task_id = v8::base::bits::CountTrailingZeros32(~assigned_task_ids);
4
Calling 'CountTrailingZeros32'
11
Returning from 'CountTrailingZeros32'
12
The value 32 is assigned to 'task_id'
80 new_assigned_task_ids = assigned_task_ids | (uint32_t(1) << task_id);
13
The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'uint32_t'
81 } while (!assigned_task_ids_.compare_exchange_weak(
82 assigned_task_ids, new_assigned_task_ids, std::memory_order_acquire,
83 std::memory_order_relaxed));
84 return task_id;
85}
86
87void DefaultJobState::ReleaseTaskId(uint8_t task_id) {
88 // memory_order_release to match AcquireTaskId().
89 uint32_t previous_task_ids = assigned_task_ids_.fetch_and(
90 ~(uint32_t(1) << task_id), std::memory_order_release);
91 DCHECK(previous_task_ids & (uint32_t(1) << task_id))((void) 0);
92 USE(previous_task_ids)do { ::v8::base::Use unused_tmp_array_for_use_macro[]{previous_task_ids
}; (void)unused_tmp_array_for_use_macro; } while (false)
;
93}
94
95void DefaultJobState::Join() {
96 bool can_run = false;
97 {
98 base::MutexGuard guard(&mutex_);
99 priority_ = TaskPriority::kUserBlocking;
100 // Reserve a worker for the joining thread. GetMaxConcurrency() is ignored
101 // here, but WaitForParticipationOpportunityLockRequired() waits for
102 // workers to return if necessary so we don't exceed GetMaxConcurrency().
103 num_worker_threads_ = platform_->NumberOfWorkerThreads() + 1;
104 ++active_workers_;
105 can_run = WaitForParticipationOpportunityLockRequired();
106 }
107 DefaultJobState::JobDelegate delegate(this, true);
108 while (can_run) {
109 job_task_->Run(&delegate);
110 base::MutexGuard guard(&mutex_);
111 can_run = WaitForParticipationOpportunityLockRequired();
112 }
113}
114
115void DefaultJobState::CancelAndWait() {
116 {
117 base::MutexGuard guard(&mutex_);
118 is_canceled_.store(true, std::memory_order_relaxed);
119 while (active_workers_ > 0) {
120 worker_released_condition_.Wait(&mutex_);
121 }
122 }
123}
124
125void DefaultJobState::CancelAndDetach() {
126 is_canceled_.store(true, std::memory_order_relaxed);
127}
128
129bool DefaultJobState::IsActive() {
130 base::MutexGuard guard(&mutex_);
131 return job_task_->GetMaxConcurrency(active_workers_) != 0 ||
132 active_workers_ != 0;
133}
134
135bool DefaultJobState::CanRunFirstTask() {
136 base::MutexGuard guard(&mutex_);
137 --pending_tasks_;
138 if (is_canceled_.load(std::memory_order_relaxed)) return false;
139 if (active_workers_ >= std::min(job_task_->GetMaxConcurrency(active_workers_),
140 num_worker_threads_)) {
141 return false;
142 }
143 // Acquire current worker.
144 ++active_workers_;
145 return true;
146}
147
148bool DefaultJobState::DidRunTask() {
149 size_t num_tasks_to_post = 0;
150 TaskPriority priority;
151 {
152 base::MutexGuard guard(&mutex_);
153 const size_t max_concurrency = CappedMaxConcurrency(active_workers_ - 1);
154 if (is_canceled_.load(std::memory_order_relaxed) ||
155 active_workers_ > max_concurrency) {
156 // Release current worker and notify.
157 --active_workers_;
158 worker_released_condition_.NotifyOne();
159 return false;
160 }
161 // Consider |pending_tasks_| to avoid posting too many tasks.
162 if (max_concurrency > active_workers_ + pending_tasks_) {
163 num_tasks_to_post = max_concurrency - active_workers_ - pending_tasks_;
164 pending_tasks_ += num_tasks_to_post;
165 }
166 priority = priority_;
167 }
168 // Post additional worker tasks to reach |max_concurrency| in the case that
169 // max concurrency increased. This is not strictly necessary, since
170 // NotifyConcurrencyIncrease() should eventually be invoked. However, some
171 // users of PostJob() batch work and tend to call NotifyConcurrencyIncrease()
172 // late. Posting here allows us to spawn new workers sooner.
173 for (size_t i = 0; i < num_tasks_to_post; ++i) {
174 CallOnWorkerThread(priority, std::make_unique<DefaultJobWorker>(
175 shared_from_this(), job_task_.get()));
176 }
177 return true;
178}
179
180bool DefaultJobState::WaitForParticipationOpportunityLockRequired() {
181 size_t max_concurrency = CappedMaxConcurrency(active_workers_ - 1);
182 while (active_workers_ > max_concurrency && active_workers_ > 1) {
183 worker_released_condition_.Wait(&mutex_);
184 max_concurrency = CappedMaxConcurrency(active_workers_ - 1);
185 }
186 if (active_workers_ <= max_concurrency) return true;
187 DCHECK_EQ(1U, active_workers_)((void) 0);
188 DCHECK_EQ(0U, max_concurrency)((void) 0);
189 active_workers_ = 0;
190 is_canceled_.store(true, std::memory_order_relaxed);
191 return false;
192}
193
194size_t DefaultJobState::CappedMaxConcurrency(size_t worker_count) const {
195 return std::min(job_task_->GetMaxConcurrency(worker_count),
196 num_worker_threads_);
197}
198
199void DefaultJobState::CallOnWorkerThread(TaskPriority priority,
200 std::unique_ptr<Task> task) {
201 switch (priority) {
202 case TaskPriority::kBestEffort:
203 return platform_->CallLowPriorityTaskOnWorkerThread(std::move(task));
204 case TaskPriority::kUserVisible:
205 return platform_->CallOnWorkerThread(std::move(task));
206 case TaskPriority::kUserBlocking:
207 return platform_->CallBlockingTaskOnWorkerThread(std::move(task));
208 }
209}
210
211void DefaultJobState::UpdatePriority(TaskPriority priority) {
212 base::MutexGuard guard(&mutex_);
213 priority_ = priority;
214}
215
216DefaultJobHandle::DefaultJobHandle(std::shared_ptr<DefaultJobState> state)
217 : state_(std::move(state)) {
218 state_->NotifyConcurrencyIncrease();
219}
220
221DefaultJobHandle::~DefaultJobHandle() { DCHECK_EQ(nullptr, state_)((void) 0); }
222
223void DefaultJobHandle::Join() {
224 state_->Join();
225 state_ = nullptr;
226}
227void DefaultJobHandle::Cancel() {
228 state_->CancelAndWait();
229 state_ = nullptr;
230}
231
232void DefaultJobHandle::CancelAndDetach() {
233 state_->CancelAndDetach();
234 state_ = nullptr;
235}
236
237bool DefaultJobHandle::IsActive() { return state_->IsActive(); }
238
239void DefaultJobHandle::UpdatePriority(TaskPriority priority) {
240 state_->UpdatePriority(priority);
241}
242
243} // namespace platform
244} // namespace v8

../deps/v8/src/base/bits.h

1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_BASE_BITS_H_
6#define V8_BASE_BITS_H_
7
8#include <stdint.h>
9#include <type_traits>
10
11#include "src/base/base-export.h"
12#include "src/base/macros.h"
13#if V8_CC_MSVC
14#include <intrin.h>
15#endif
16#if V8_OS_WIN32
17#include "src/base/win32-headers.h"
18#endif
19
20namespace v8 {
21namespace base {
22namespace bits {
23
24// CountPopulation(value) returns the number of bits set in |value|.
25template <typename T>
26constexpr inline
27 typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
28 unsigned>::type
29 CountPopulation(T value) {
30 STATIC_ASSERT(sizeof(T) <= 8)static_assert(sizeof(T) <= 8, "sizeof(T) <= 8");
31#if V8_HAS_BUILTIN_POPCOUNT(1)
32 return sizeof(T) == 8 ? __builtin_popcountll(static_cast<uint64_t>(value))
33 : __builtin_popcount(static_cast<uint32_t>(value));
34#else
35 // Fall back to divide-and-conquer popcount (see "Hacker's Delight" by Henry
36 // S. Warren, Jr.), chapter 5-1.
37 constexpr uint64_t mask[] = {0x5555555555555555, 0x3333333333333333,
38 0x0f0f0f0f0f0f0f0f};
39 // Start with 64 buckets of 1 bits, holding values from [0,1].
40 value = ((value >> 1) & mask[0]) + (value & mask[0]);
41 // Having 32 buckets of 2 bits, holding values from [0,2] now.
42 value = ((value >> 2) & mask[1]) + (value & mask[1]);
43 // Having 16 buckets of 4 bits, holding values from [0,4] now.
44 value = ((value >> 4) & mask[2]) + (value & mask[2]);
45 // Having 8 buckets of 8 bits, holding values from [0,8] now.
46 // From this point on, the buckets are bigger than the number of bits
47 // required to hold the values, and the buckets are bigger the maximum
48 // result, so there's no need to mask value anymore, since there's no
49 // more risk of overflow between buckets.
50 if (sizeof(T) > 1) value = (value >> (sizeof(T) > 1 ? 8 : 0)) + value;
51 // Having 4 buckets of 16 bits, holding values from [0,16] now.
52 if (sizeof(T) > 2) value = (value >> (sizeof(T) > 2 ? 16 : 0)) + value;
53 // Having 2 buckets of 32 bits, holding values from [0,32] now.
54 if (sizeof(T) > 4) value = (value >> (sizeof(T) > 4 ? 32 : 0)) + value;
55 // Having 1 buckets of 64 bits, holding values from [0,64] now.
56 return static_cast<unsigned>(value & 0xff);
57#endif
58}
59
60// ReverseBits(value) returns |value| in reverse bit order.
61template <typename T>
62T ReverseBits(T value) {
63 STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||static_assert((sizeof(value) == 1) || (sizeof(value) == 2) ||
(sizeof(value) == 4) || (sizeof(value) == 8), "(sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) || (sizeof(value) == 8)"
)
64 (sizeof(value) == 4) || (sizeof(value) == 8))static_assert((sizeof(value) == 1) || (sizeof(value) == 2) ||
(sizeof(value) == 4) || (sizeof(value) == 8), "(sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) || (sizeof(value) == 8)"
)
;
65 T result = 0;
66 for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
67 result = (result << 1) | (value & 1);
68 value >>= 1;
69 }
70 return result;
71}
72
73// CountLeadingZeros(value) returns the number of zero bits following the most
74// significant 1 bit in |value| if |value| is non-zero, otherwise it returns
75// {sizeof(T) * 8}.
76template <typename T, unsigned bits = sizeof(T) * 8>
77inline constexpr
78 typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
79 unsigned>::type
80 CountLeadingZeros(T value) {
81 static_assert(bits > 0, "invalid instantiation");
82#if V8_HAS_BUILTIN_CLZ(1)
83 return value == 0
84 ? bits
85 : bits == 64
86 ? __builtin_clzll(static_cast<uint64_t>(value))
87 : __builtin_clz(static_cast<uint32_t>(value)) - (32 - bits);
88#else
89 // Binary search algorithm taken from "Hacker's Delight" (by Henry S. Warren,
90 // Jr.), figures 5-11 and 5-12.
91 if (bits == 1) return static_cast<unsigned>(value) ^ 1;
92 T upper_half = value >> (bits / 2);
93 T next_value = upper_half != 0 ? upper_half : value;
94 unsigned add = upper_half != 0 ? 0 : bits / 2;
95 constexpr unsigned next_bits = bits == 1 ? 1 : bits / 2;
96 return CountLeadingZeros<T, next_bits>(next_value) + add;
97#endif
98}
99
100inline constexpr unsigned CountLeadingZeros32(uint32_t value) {
101 return CountLeadingZeros(value);
102}
103inline constexpr unsigned CountLeadingZeros64(uint64_t value) {
104 return CountLeadingZeros(value);
105}
106
107// CountTrailingZeros(value) returns the number of zero bits preceding the
108// least significant 1 bit in |value| if |value| is non-zero, otherwise it
109// returns {sizeof(T) * 8}.
110// See CountTrailingZerosNonZero for an optimized version for the case that
111// |value| is guaranteed to be non-zero.
112template <typename T, unsigned bits = sizeof(T) * 8>
113inline constexpr
114 typename std::enable_if<std::is_integral<T>::value && sizeof(T) <= 8,
115 unsigned>::type
116 CountTrailingZeros(T value) {
117#if V8_HAS_BUILTIN_CTZ(1)
118 return value == 0 ? bits
6
Assuming 'value' is equal to 0
7
'?' condition is true
8
Returning the value 32
119 : bits == 64 ? __builtin_ctzll(static_cast<uint64_t>(value))
120 : __builtin_ctz(static_cast<uint32_t>(value));
121#else
122 // Fall back to popcount (see "Hacker's Delight" by Henry S. Warren, Jr.),
123 // chapter 5-4. On x64, since is faster than counting in a loop and faster
124 // than doing binary search.
125 using U = typename std::make_unsigned<T>::type;
126 U u = value;
127 return CountPopulation(static_cast<U>(~u & (u - 1u)));
128#endif
129}
130
131inline constexpr unsigned CountTrailingZeros32(uint32_t value) {
132 return CountTrailingZeros(value);
5
Calling 'CountTrailingZeros<unsigned int, 32U>'
9
Returning from 'CountTrailingZeros<unsigned int, 32U>'
10
Returning the value 32
133}
134inline constexpr unsigned CountTrailingZeros64(uint64_t value) {
135 return CountTrailingZeros(value);
136}
137
138// CountTrailingZerosNonZero(value) returns the number of zero bits preceding
139// the least significant 1 bit in |value| if |value| is non-zero, otherwise the
140// behavior is undefined.
141// See CountTrailingZeros for an alternative version that allows |value| == 0.
142template <typename T, unsigned bits = sizeof(T) * 8>
143inline constexpr
144 typename std::enable_if<std::is_integral<T>::value && sizeof(T) <= 8,
145 unsigned>::type
146 CountTrailingZerosNonZero(T value) {
147 DCHECK_NE(0, value)((void) 0);
148#if V8_HAS_BUILTIN_CTZ(1)
149 return bits == 64 ? __builtin_ctzll(static_cast<uint64_t>(value))
150 : __builtin_ctz(static_cast<uint32_t>(value));
151#else
152 return CountTrailingZeros<T, bits>(value);
153#endif
154}
155
156// Returns true iff |value| is a power of 2.
157template <typename T,
158 typename = typename std::enable_if<std::is_integral<T>::value ||
159 std::is_enum<T>::value>::type>
160constexpr inline bool IsPowerOfTwo(T value) {
161 return value > 0 && (value & (value - 1)) == 0;
162}
163
164// Identical to {CountTrailingZeros}, but only works for powers of 2.
165template <typename T,
166 typename = typename std::enable_if<std::is_integral<T>::value>::type>
167inline constexpr int WhichPowerOfTwo(T value) {
168 DCHECK(IsPowerOfTwo(value))((void) 0);
169#if V8_HAS_BUILTIN_CTZ(1)
170 STATIC_ASSERT(sizeof(T) <= 8)static_assert(sizeof(T) <= 8, "sizeof(T) <= 8");
171 return sizeof(T) == 8 ? __builtin_ctzll(static_cast<uint64_t>(value))
172 : __builtin_ctz(static_cast<uint32_t>(value));
173#else
174 // Fall back to popcount (see "Hacker's Delight" by Henry S. Warren, Jr.),
175 // chapter 5-4. On x64, since is faster than counting in a loop and faster
176 // than doing binary search.
177 using U = typename std::make_unsigned<T>::type;
178 U u = value;
179 return CountPopulation(static_cast<U>(u - 1));
180#endif
181}
182
183// RoundUpToPowerOfTwo32(value) returns the smallest power of two which is
184// greater than or equal to |value|. If you pass in a |value| that is already a
185// power of two, it is returned as is. |value| must be less than or equal to
186// 0x80000000u. Uses computation based on leading zeros if we have compiler
187// support for that. Falls back to the implementation from "Hacker's Delight" by
188// Henry S. Warren, Jr., figure 3-3, page 48, where the function is called clp2.
189V8_BASE_EXPORT uint32_t RoundUpToPowerOfTwo32(uint32_t value);
190// Same for 64 bit integers. |value| must be <= 2^63
191V8_BASE_EXPORT uint64_t RoundUpToPowerOfTwo64(uint64_t value);
192// Same for size_t integers.
193inline size_t RoundUpToPowerOfTwo(size_t value) {
194 if (sizeof(size_t) == sizeof(uint64_t)) {
195 return RoundUpToPowerOfTwo64(value);
196 } else {
197 // Without windows.h included this line triggers a truncation warning on
198 // 64-bit builds. Presumably windows.h disables the relevant warning.
199 return RoundUpToPowerOfTwo32(static_cast<uint32_t>(value));
200 }
201}
202
203// RoundDownToPowerOfTwo32(value) returns the greatest power of two which is
204// less than or equal to |value|. If you pass in a |value| that is already a
205// power of two, it is returned as is.
206inline uint32_t RoundDownToPowerOfTwo32(uint32_t value) {
207 if (value > 0x80000000u) return 0x80000000u;
208 uint32_t result = RoundUpToPowerOfTwo32(value);
209 if (result > value) result >>= 1;
210 return result;
211}
212
213
214// Precondition: 0 <= shift < 32
215inline constexpr uint32_t RotateRight32(uint32_t value, uint32_t shift) {
216 return (value >> shift) | (value << ((32 - shift) & 31));
217}
218
219// Precondition: 0 <= shift < 32
220inline constexpr uint32_t RotateLeft32(uint32_t value, uint32_t shift) {
221 return (value << shift) | (value >> ((32 - shift) & 31));
222}
223
224// Precondition: 0 <= shift < 64
225inline constexpr uint64_t RotateRight64(uint64_t value, uint64_t shift) {
226 return (value >> shift) | (value << ((64 - shift) & 63));
227}
228
229// Precondition: 0 <= shift < 64
230inline constexpr uint64_t RotateLeft64(uint64_t value, uint64_t shift) {
231 return (value << shift) | (value >> ((64 - shift) & 63));
232}
233
234// SignedAddOverflow32(lhs,rhs,val) performs a signed summation of |lhs| and
235// |rhs| and stores the result into the variable pointed to by |val| and
236// returns true if the signed summation resulted in an overflow.
237inline bool SignedAddOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
238#if V8_HAS_BUILTIN_SADD_OVERFLOW(1)
239 return __builtin_sadd_overflow(lhs, rhs, val);
240#else
241 uint32_t res = static_cast<uint32_t>(lhs) + static_cast<uint32_t>(rhs);
242 *val = bit_cast<int32_t>(res);
243 return ((res ^ lhs) & (res ^ rhs) & (1U << 31)) != 0;
244#endif
245}
246
247
248// SignedSubOverflow32(lhs,rhs,val) performs a signed subtraction of |lhs| and
249// |rhs| and stores the result into the variable pointed to by |val| and
250// returns true if the signed subtraction resulted in an overflow.
251inline bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
252#if V8_HAS_BUILTIN_SSUB_OVERFLOW(1)
253 return __builtin_ssub_overflow(lhs, rhs, val);
254#else
255 uint32_t res = static_cast<uint32_t>(lhs) - static_cast<uint32_t>(rhs);
256 *val = bit_cast<int32_t>(res);
257 return ((res ^ lhs) & (res ^ ~rhs) & (1U << 31)) != 0;
258#endif
259}
260
261// SignedMulOverflow32(lhs,rhs,val) performs a signed multiplication of |lhs|
262// and |rhs| and stores the result into the variable pointed to by |val| and
263// returns true if the signed multiplication resulted in an overflow.
264V8_BASE_EXPORT bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val);
265
266// SignedAddOverflow64(lhs,rhs,val) performs a signed summation of |lhs| and
267// |rhs| and stores the result into the variable pointed to by |val| and
268// returns true if the signed summation resulted in an overflow.
269inline bool SignedAddOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
270 uint64_t res = static_cast<uint64_t>(lhs) + static_cast<uint64_t>(rhs);
271 *val = bit_cast<int64_t>(res);
272 return ((res ^ lhs) & (res ^ rhs) & (1ULL << 63)) != 0;
273}
274
275
276// SignedSubOverflow64(lhs,rhs,val) performs a signed subtraction of |lhs| and
277// |rhs| and stores the result into the variable pointed to by |val| and
278// returns true if the signed subtraction resulted in an overflow.
279inline bool SignedSubOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
280 uint64_t res = static_cast<uint64_t>(lhs) - static_cast<uint64_t>(rhs);
281 *val = bit_cast<int64_t>(res);
282 return ((res ^ lhs) & (res ^ ~rhs) & (1ULL << 63)) != 0;
283}
284
285// SignedMulHigh32(lhs, rhs) multiplies two signed 32-bit values |lhs| and
286// |rhs|, extracts the most significant 32 bits of the result, and returns
287// those.
288V8_BASE_EXPORT int32_t SignedMulHigh32(int32_t lhs, int32_t rhs);
289
290// SignedMulHighAndAdd32(lhs, rhs, acc) multiplies two signed 32-bit values
291// |lhs| and |rhs|, extracts the most significant 32 bits of the result, and
292// adds the accumulate value |acc|.
293V8_BASE_EXPORT int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs,
294 int32_t acc);
295
296// SignedDiv32(lhs, rhs) divides |lhs| by |rhs| and returns the quotient
297// truncated to int32. If |rhs| is zero, then zero is returned. If |lhs|
298// is minint and |rhs| is -1, it returns minint.
299V8_BASE_EXPORT int32_t SignedDiv32(int32_t lhs, int32_t rhs);
300
301// SignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder
302// truncated to int32. If either |rhs| is zero or |lhs| is minint and |rhs|
303// is -1, it returns zero.
304V8_BASE_EXPORT int32_t SignedMod32(int32_t lhs, int32_t rhs);
305
306// UnsignedAddOverflow32(lhs,rhs,val) performs an unsigned summation of |lhs|
307// and |rhs| and stores the result into the variable pointed to by |val| and
308// returns true if the unsigned summation resulted in an overflow.
309inline bool UnsignedAddOverflow32(uint32_t lhs, uint32_t rhs, uint32_t* val) {
310#if V8_HAS_BUILTIN_SADD_OVERFLOW(1)
311 return __builtin_uadd_overflow(lhs, rhs, val);
312#else
313 *val = lhs + rhs;
314 return *val < (lhs | rhs);
315#endif
316}
317
318
319// UnsignedDiv32(lhs, rhs) divides |lhs| by |rhs| and returns the quotient
320// truncated to uint32. If |rhs| is zero, then zero is returned.
321inline uint32_t UnsignedDiv32(uint32_t lhs, uint32_t rhs) {
322 return rhs ? lhs / rhs : 0u;
323}
324
325
326// UnsignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder
327// truncated to uint32. If |rhs| is zero, then zero is returned.
328inline uint32_t UnsignedMod32(uint32_t lhs, uint32_t rhs) {
329 return rhs ? lhs % rhs : 0u;
330}
331
332// Wraparound integer arithmetic without undefined behavior.
333
334inline int32_t WraparoundAdd32(int32_t lhs, int32_t rhs) {
335 return static_cast<int32_t>(static_cast<uint32_t>(lhs) +
336 static_cast<uint32_t>(rhs));
337}
338
339inline int32_t WraparoundNeg32(int32_t x) {
340 return static_cast<int32_t>(-static_cast<uint32_t>(x));
341}
342
343// SignedSaturatedAdd64(lhs, rhs) adds |lhs| and |rhs|,
344// checks and returns the result.
345V8_BASE_EXPORT int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs);
346
347// SignedSaturatedSub64(lhs, rhs) subtracts |lhs| by |rhs|,
348// checks and returns the result.
349V8_BASE_EXPORT int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs);
350
351} // namespace bits
352} // namespace base
353} // namespace v8
354
355#endif // V8_BASE_BITS_H_