Bug Summary

File:out/../deps/v8/src/compiler/machine-operator-reducer.cc
Warning:line 269, column 11
Assigned value is garbage or undefined

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name machine-operator-reducer.cc -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/home/maurizio/node-v18.6.0/out -resource-dir /usr/local/lib/clang/16.0.0 -D _GLIBCXX_USE_CXX11_ABI=1 -D NODE_OPENSSL_CONF_NAME=nodejs_conf -D NODE_OPENSSL_HAS_QUIC -D V8_GYP_BUILD -D V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP=64 -D __STDC_FORMAT_MACROS -D OPENSSL_NO_PINSHARED -D OPENSSL_THREADS -D V8_TARGET_ARCH_X64 -D V8_HAVE_TARGET_OS -D V8_TARGET_OS_LINUX -D V8_EMBEDDER_STRING="-node.8" -D ENABLE_DISASSEMBLER -D V8_PROMISE_INTERNAL_FIELD_COUNT=1 -D V8_SHORT_BUILTIN_CALLS -D OBJECT_PRINT -D V8_INTL_SUPPORT -D V8_ATOMIC_OBJECT_FIELD_WRITES -D V8_ENABLE_LAZY_SOURCE_POSITIONS -D V8_USE_SIPHASH -D V8_SHARED_RO_HEAP -D V8_WIN64_UNWINDING_INFO -D V8_ENABLE_REGEXP_INTERPRETER_THREADED_DISPATCH -D V8_SNAPSHOT_COMPRESSION -D V8_ENABLE_WEBASSEMBLY -D V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS -D V8_ALLOCATION_FOLDING -D V8_ALLOCATION_SITE_TRACKING -D V8_SCRIPTORMODULE_LEGACY_LIFETIME -D V8_ADVANCED_BIGINT_ALGORITHMS -D UCONFIG_NO_SERVICE=1 -D U_ENABLE_DYLOAD=0 -D U_STATIC_IMPLEMENTATION=1 -D U_HAVE_STD_STRING=1 -D UCONFIG_NO_BREAK_ITERATION=0 -I ../deps/v8 -I ../deps/v8/include -I /home/maurizio/node-v18.6.0/out/Release/obj/gen/generate-bytecode-output-root -I /home/maurizio/node-v18.6.0/out/Release/obj/gen -I ../deps/icu-small/source/i18n -I ../deps/icu-small/source/common -internal-isystem /usr/lib/gcc/x86_64-redhat-linux/8/../../../../include/c++/8 -internal-isystem /usr/lib/gcc/x86_64-redhat-linux/8/../../../../include/c++/8/x86_64-redhat-linux -internal-isystem /usr/lib/gcc/x86_64-redhat-linux/8/../../../../include/c++/8/backward -internal-isystem /usr/local/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-redhat-linux/8/../../../../x86_64-redhat-linux/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wno-return-type -std=gnu++17 -fdeprecated-macro -fdebug-compilation-dir=/home/maurizio/node-v18.6.0/out -ferror-limit 19 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-08-22-142216-507842-1 -x c++ ../deps/v8/src/compiler/machine-operator-reducer.cc

../deps/v8/src/compiler/machine-operator-reducer.cc

1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/machine-operator-reducer.h"
6
7#include <cmath>
8#include <limits>
9
10#include "src/base/bits.h"
11#include "src/base/division-by-constant.h"
12#include "src/base/ieee754.h"
13#include "src/base/logging.h"
14#include "src/base/overflowing-math.h"
15#include "src/codegen/tnode.h"
16#include "src/compiler/diamond.h"
17#include "src/compiler/graph.h"
18#include "src/compiler/js-operator.h"
19#include "src/compiler/machine-graph.h"
20#include "src/compiler/node-matchers.h"
21#include "src/compiler/node-properties.h"
22#include "src/compiler/opcodes.h"
23#include "src/numbers/conversions-inl.h"
24
25namespace v8 {
26namespace internal {
27namespace compiler {
28
29// Some optimizations performed by the MachineOperatorReducer can be applied
30// to both Word32 and Word64 operations. Those are implemented in a generic
31// way to be reused for different word sizes.
32// This class adapts a generic algorithm to Word32 operations.
33class Word32Adapter {
34 public:
35 using IntNBinopMatcher = Int32BinopMatcher;
36 using UintNBinopMatcher = Uint32BinopMatcher;
37 using intN_t = int32_t;
38 // WORD_SIZE refers to the N for which this adapter specializes.
39 static constexpr std::size_t WORD_SIZE = 32;
40
41 explicit Word32Adapter(MachineOperatorReducer* reducer) : r_(reducer) {}
42
43 template <typename T>
44 static bool IsWordNAnd(const T& x) {
45 return x.IsWord32And();
46 }
47 template <typename T>
48 static bool IsWordNShl(const T& x) {
49 return x.IsWord32Shl();
50 }
51 template <typename T>
52 static bool IsWordNShr(const T& x) {
53 return x.IsWord32Shr();
54 }
55 template <typename T>
56 static bool IsWordNSar(const T& x) {
57 return x.IsWord32Sar();
58 }
59 template <typename T>
60 static bool IsWordNXor(const T& x) {
61 return x.IsWord32Xor();
62 }
63 template <typename T>
64 static bool IsIntNAdd(const T& x) {
65 return x.IsInt32Add();
66 }
67 template <typename T>
68 static bool IsIntNMul(const T& x) {
69 return x.IsInt32Mul();
70 }
71
72 const Operator* IntNAdd(MachineOperatorBuilder* machine) {
73 return machine->Int32Add();
74 }
75
76 Reduction ReplaceIntN(int32_t value) { return r_->ReplaceInt32(value); }
77 Reduction ReduceWordNAnd(Node* node) { return r_->ReduceWord32And(node); }
78 Reduction ReduceIntNAdd(Node* node) { return r_->ReduceInt32Add(node); }
79 Reduction TryMatchWordNRor(Node* node) { return r_->TryMatchWord32Ror(node); }
80
81 Node* IntNConstant(int32_t value) { return r_->Int32Constant(value); }
82 Node* UintNConstant(uint32_t value) { return r_->Uint32Constant(value); }
83 Node* WordNAnd(Node* lhs, Node* rhs) { return r_->Word32And(lhs, rhs); }
84
85 private:
86 MachineOperatorReducer* r_;
87};
88
89// Some optimizations performed by the MachineOperatorReducer can be applied
90// to both Word32 and Word64 operations. Those are implemented in a generic
91// way to be reused for different word sizes.
92// This class adapts a generic algorithm to Word64 operations.
93class Word64Adapter {
94 public:
95 using IntNBinopMatcher = Int64BinopMatcher;
96 using UintNBinopMatcher = Uint64BinopMatcher;
97 using intN_t = int64_t;
98 // WORD_SIZE refers to the N for which this adapter specializes.
99 static constexpr std::size_t WORD_SIZE = 64;
100
101 explicit Word64Adapter(MachineOperatorReducer* reducer) : r_(reducer) {}
102
103 template <typename T>
104 static bool IsWordNAnd(const T& x) {
105 return x.IsWord64And();
106 }
107 template <typename T>
108 static bool IsWordNShl(const T& x) {
109 return x.IsWord64Shl();
110 }
111 template <typename T>
112 static bool IsWordNShr(const T& x) {
113 return x.IsWord64Shr();
114 }
115 template <typename T>
116 static bool IsWordNSar(const T& x) {
117 return x.IsWord64Sar();
118 }
119 template <typename T>
120 static bool IsWordNXor(const T& x) {
121 return x.IsWord64Xor();
122 }
123 template <typename T>
124 static bool IsIntNAdd(const T& x) {
125 return x.IsInt64Add();
126 }
127 template <typename T>
128 static bool IsIntNMul(const T& x) {
129 return x.IsInt64Mul();
130 }
131
132 static const Operator* IntNAdd(MachineOperatorBuilder* machine) {
133 return machine->Int64Add();
134 }
135
136 Reduction ReplaceIntN(int64_t value) { return r_->ReplaceInt64(value); }
137 Reduction ReduceWordNAnd(Node* node) { return r_->ReduceWord64And(node); }
138 Reduction ReduceIntNAdd(Node* node) { return r_->ReduceInt64Add(node); }
139 Reduction TryMatchWordNRor(Node* node) {
140 // TODO(nicohartmann@): Add a MachineOperatorReducer::TryMatchWord64Ror.
141 return r_->NoChange();
142 }
143
144 Node* IntNConstant(int64_t value) { return r_->Int64Constant(value); }
145 Node* UintNConstant(uint64_t value) { return r_->Uint64Constant(value); }
146 Node* WordNAnd(Node* lhs, Node* rhs) { return r_->Word64And(lhs, rhs); }
147
148 private:
149 MachineOperatorReducer* r_;
150};
151
152namespace {
153
154// TODO(jgruber): Consider replacing all uses of this function by
155// std::numeric_limits<T>::quiet_NaN().
156template <class T>
157T SilenceNaN(T x) {
158 DCHECK(std::isnan(x))((void) 0);
159 // Do some calculation to make a signalling NaN quiet.
160 return x - x;
161}
162
163} // namespace
164
165MachineOperatorReducer::MachineOperatorReducer(Editor* editor,
166 MachineGraph* mcgraph,
167 bool allow_signalling_nan)
168 : AdvancedReducer(editor),
169 mcgraph_(mcgraph),
170 allow_signalling_nan_(allow_signalling_nan) {}
171
172MachineOperatorReducer::~MachineOperatorReducer() = default;
173
174
175Node* MachineOperatorReducer::Float32Constant(volatile float value) {
176 return graph()->NewNode(common()->Float32Constant(value));
177}
178
179Node* MachineOperatorReducer::Float64Constant(volatile double value) {
180 return mcgraph()->Float64Constant(value);
181}
182
183Node* MachineOperatorReducer::Int32Constant(int32_t value) {
184 return mcgraph()->Int32Constant(value);
185}
186
187Node* MachineOperatorReducer::Int64Constant(int64_t value) {
188 return graph()->NewNode(common()->Int64Constant(value));
189}
190
191Node* MachineOperatorReducer::Float64Mul(Node* lhs, Node* rhs) {
192 return graph()->NewNode(machine()->Float64Mul(), lhs, rhs);
193}
194
195Node* MachineOperatorReducer::Float64PowHalf(Node* value) {
196 value =
197 graph()->NewNode(machine()->Float64Add(), Float64Constant(0.0), value);
198 Diamond d(graph(), common(),
199 graph()->NewNode(machine()->Float64LessThanOrEqual(), value,
200 Float64Constant(-V8_INFINITYstd::numeric_limits<double>::infinity())),
201 BranchHint::kFalse);
202 return d.Phi(MachineRepresentation::kFloat64, Float64Constant(V8_INFINITYstd::numeric_limits<double>::infinity()),
203 graph()->NewNode(machine()->Float64Sqrt(), value));
204}
205
206Node* MachineOperatorReducer::Word32And(Node* lhs, Node* rhs) {
207 Node* const node = graph()->NewNode(machine()->Word32And(), lhs, rhs);
208 Reduction const reduction = ReduceWord32And(node);
209 return reduction.Changed() ? reduction.replacement() : node;
210}
211
212Node* MachineOperatorReducer::Word32Sar(Node* lhs, uint32_t rhs) {
213 if (rhs == 0) return lhs;
214 return graph()->NewNode(machine()->Word32Sar(), lhs, Uint32Constant(rhs));
215}
216
217Node* MachineOperatorReducer::Word32Shr(Node* lhs, uint32_t rhs) {
218 if (rhs == 0) return lhs;
219 return graph()->NewNode(machine()->Word32Shr(), lhs, Uint32Constant(rhs));
220}
221
222Node* MachineOperatorReducer::Word32Equal(Node* lhs, Node* rhs) {
223 return graph()->NewNode(machine()->Word32Equal(), lhs, rhs);
224}
225
226Node* MachineOperatorReducer::Word64And(Node* lhs, Node* rhs) {
227 Node* const node = graph()->NewNode(machine()->Word64And(), lhs, rhs);
228 Reduction const reduction = ReduceWord64And(node);
229 return reduction.Changed() ? reduction.replacement() : node;
230}
231
232Node* MachineOperatorReducer::Int32Add(Node* lhs, Node* rhs) {
233 Node* const node = graph()->NewNode(machine()->Int32Add(), lhs, rhs);
234 Reduction const reduction = ReduceInt32Add(node);
235 return reduction.Changed() ? reduction.replacement() : node;
236}
237
238Node* MachineOperatorReducer::Int32Sub(Node* lhs, Node* rhs) {
239 Node* const node = graph()->NewNode(machine()->Int32Sub(), lhs, rhs);
240 Reduction const reduction = ReduceInt32Sub(node);
241 return reduction.Changed() ? reduction.replacement() : node;
242}
243
244Node* MachineOperatorReducer::Int32Mul(Node* lhs, Node* rhs) {
245 return graph()->NewNode(machine()->Int32Mul(), lhs, rhs);
246}
247
248Node* MachineOperatorReducer::Int32Div(Node* dividend, int32_t divisor) {
249 DCHECK_NE(0, divisor)((void) 0);
250 DCHECK_NE(std::numeric_limits<int32_t>::min(), divisor)((void) 0);
251 base::MagicNumbersForDivision<uint32_t> const mag =
252 base::SignedDivisionByConstant(bit_cast<uint32_t>(divisor));
253 Node* quotient = graph()->NewNode(machine()->Int32MulHigh(), dividend,
254 Uint32Constant(mag.multiplier));
255 if (divisor > 0 && bit_cast<int32_t>(mag.multiplier) < 0) {
256 quotient = Int32Add(quotient, dividend);
257 } else if (divisor < 0 && bit_cast<int32_t>(mag.multiplier) > 0) {
258 quotient = Int32Sub(quotient, dividend);
259 }
260 return Int32Add(Word32Sar(quotient, mag.shift), Word32Shr(dividend, 31));
261}
262
263Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) {
264 DCHECK_LT(0u, divisor)((void) 0);
265 // If the divisor is even, we can avoid using the expensive fixup by shifting
266 // the dividend upfront.
267 unsigned const shift = base::bits::CountTrailingZeros(divisor);
1
Calling 'CountTrailingZeros<unsigned int, 32U>'
5
Returning from 'CountTrailingZeros<unsigned int, 32U>'
6
'shift' initialized to 32
268 dividend = Word32Shr(dividend, shift);
269 divisor >>= shift;
7
Assigned value is garbage or undefined
270 // Compute the magic number for the (shifted) divisor.
271 base::MagicNumbersForDivision<uint32_t> const mag =
272 base::UnsignedDivisionByConstant(divisor, shift);
273 Node* quotient = graph()->NewNode(machine()->Uint32MulHigh(), dividend,
274 Uint32Constant(mag.multiplier));
275 if (mag.add) {
276 DCHECK_LE(1u, mag.shift)((void) 0);
277 quotient = Word32Shr(
278 Int32Add(Word32Shr(Int32Sub(dividend, quotient), 1), quotient),
279 mag.shift - 1);
280 } else {
281 quotient = Word32Shr(quotient, mag.shift);
282 }
283 return quotient;
284}
285
286Node* MachineOperatorReducer::TruncateInt64ToInt32(Node* value) {
287 Node* const node = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
288 Reduction const reduction = ReduceTruncateInt64ToInt32(node);
289 return reduction.Changed() ? reduction.replacement() : node;
290}
291
292namespace {
293bool ObjectsMayAlias(Node* a, Node* b) {
294 if (a != b) {
295 if (NodeProperties::IsFreshObject(b)) std::swap(a, b);
296 if (NodeProperties::IsFreshObject(a) &&
297 (NodeProperties::IsFreshObject(b) ||
298 b->opcode() == IrOpcode::kParameter ||
299 b->opcode() == IrOpcode::kLoadImmutable ||
300 IrOpcode::IsConstantOpcode(b->opcode()))) {
301 return false;
302 }
303 }
304 return true;
305}
306} // namespace
307
308// Perform constant folding and strength reduction on machine operators.
309Reduction MachineOperatorReducer::Reduce(Node* node) {
310 switch (node->opcode()) {
311 case IrOpcode::kProjection:
312 return ReduceProjection(ProjectionIndexOf(node->op()), node->InputAt(0));
313 case IrOpcode::kWord32And:
314 return ReduceWord32And(node);
315 case IrOpcode::kWord64And:
316 return ReduceWord64And(node);
317 case IrOpcode::kWord32Or:
318 return ReduceWord32Or(node);
319 case IrOpcode::kWord64Or:
320 return ReduceWord64Or(node);
321 case IrOpcode::kWord32Xor:
322 return ReduceWord32Xor(node);
323 case IrOpcode::kWord64Xor:
324 return ReduceWord64Xor(node);
325 case IrOpcode::kWord32Shl:
326 return ReduceWord32Shl(node);
327 case IrOpcode::kWord64Shl:
328 return ReduceWord64Shl(node);
329 case IrOpcode::kWord32Shr:
330 return ReduceWord32Shr(node);
331 case IrOpcode::kWord64Shr:
332 return ReduceWord64Shr(node);
333 case IrOpcode::kWord32Sar:
334 return ReduceWord32Sar(node);
335 case IrOpcode::kWord64Sar:
336 return ReduceWord64Sar(node);
337 case IrOpcode::kWord32Ror: {
338 Int32BinopMatcher m(node);
339 if (m.right().Is(0)) return Replace(m.left().node()); // x ror 0 => x
340 if (m.IsFoldable()) { // K ror K => K (K stands for arbitrary constants)
341 return ReplaceInt32(base::bits::RotateRight32(
342 m.left().ResolvedValue(), m.right().ResolvedValue() & 31));
343 }
344 break;
345 }
346 case IrOpcode::kWord32Equal: {
347 return ReduceWord32Equal(node);
348 }
349 case IrOpcode::kWord64Equal: {
350 Int64BinopMatcher m(node);
351 if (m.IsFoldable()) { // K == K => K (K stands for arbitrary constants)
352 return ReplaceBool(m.left().ResolvedValue() ==
353 m.right().ResolvedValue());
354 }
355 if (m.left().IsInt64Sub() && m.right().Is(0)) { // x - y == 0 => x == y
356 Int64BinopMatcher msub(m.left().node());
357 node->ReplaceInput(0, msub.left().node());
358 node->ReplaceInput(1, msub.right().node());
359 return Changed(node);
360 }
361 // TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
362 if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true
363 // This is a workaround for not having escape analysis for wasm
364 // (machine-level) turbofan graphs.
365 if (!ObjectsMayAlias(m.left().node(), m.right().node())) {
366 return ReplaceBool(false);
367 }
368 break;
369 }
370 case IrOpcode::kInt32Add:
371 return ReduceInt32Add(node);
372 case IrOpcode::kInt64Add:
373 return ReduceInt64Add(node);
374 case IrOpcode::kInt32Sub:
375 return ReduceInt32Sub(node);
376 case IrOpcode::kInt64Sub:
377 return ReduceInt64Sub(node);
378 case IrOpcode::kInt32Mul: {
379 Int32BinopMatcher m(node);
380 if (m.right().Is(0)) return Replace(m.right().node()); // x * 0 => 0
381 if (m.right().Is(1)) return Replace(m.left().node()); // x * 1 => x
382 if (m.IsFoldable()) { // K * K => K (K stands for arbitrary constants)
383 return ReplaceInt32(base::MulWithWraparound(m.left().ResolvedValue(),
384 m.right().ResolvedValue()));
385 }
386 if (m.right().Is(-1)) { // x * -1 => 0 - x
387 node->ReplaceInput(0, Int32Constant(0));
388 node->ReplaceInput(1, m.left().node());
389 NodeProperties::ChangeOp(node, machine()->Int32Sub());
390 return Changed(node);
391 }
392 if (m.right().IsPowerOf2()) { // x * 2^n => x << n
393 node->ReplaceInput(1, Int32Constant(base::bits::WhichPowerOfTwo(
394 m.right().ResolvedValue())));
395 NodeProperties::ChangeOp(node, machine()->Word32Shl());
396 return Changed(node).FollowedBy(ReduceWord32Shl(node));
397 }
398 // (x * Int32Constant(a)) * Int32Constant(b)) => x * Int32Constant(a * b)
399 if (m.right().HasResolvedValue() && m.left().IsInt32Mul()) {
400 Int32BinopMatcher n(m.left().node());
401 if (n.right().HasResolvedValue() && m.OwnsInput(m.left().node())) {
402 node->ReplaceInput(
403 1, Int32Constant(base::MulWithWraparound(
404 m.right().ResolvedValue(), n.right().ResolvedValue())));
405 node->ReplaceInput(0, n.left().node());
406 return Changed(node);
407 }
408 }
409 break;
410 }
411 case IrOpcode::kInt32MulWithOverflow: {
412 Int32BinopMatcher m(node);
413 if (m.right().Is(2)) {
414 node->ReplaceInput(1, m.left().node());
415 NodeProperties::ChangeOp(node, machine()->Int32AddWithOverflow());
416 return Changed(node);
417 }
418 if (m.right().Is(-1)) {
419 node->ReplaceInput(0, Int32Constant(0));
420 node->ReplaceInput(1, m.left().node());
421 NodeProperties::ChangeOp(node, machine()->Int32SubWithOverflow());
422 return Changed(node);
423 }
424 break;
425 }
426 case IrOpcode::kInt64Mul:
427 return ReduceInt64Mul(node);
428 case IrOpcode::kInt32Div:
429 return ReduceInt32Div(node);
430 case IrOpcode::kUint32Div:
431 return ReduceUint32Div(node);
432 case IrOpcode::kInt32Mod:
433 return ReduceInt32Mod(node);
434 case IrOpcode::kUint32Mod:
435 return ReduceUint32Mod(node);
436 case IrOpcode::kInt32LessThan: {
437 Int32BinopMatcher m(node);
438 if (m.IsFoldable()) { // K < K => K (K stands for arbitrary constants)
439 return ReplaceBool(m.left().ResolvedValue() <
440 m.right().ResolvedValue());
441 }
442 if (m.LeftEqualsRight()) return ReplaceBool(false); // x < x => false
443 if (m.left().IsWord32Or() && m.right().Is(0)) {
444 // (x | K) < 0 => true or (K | x) < 0 => true iff K < 0
445 Int32BinopMatcher mleftmatcher(m.left().node());
446 if (mleftmatcher.left().IsNegative() ||
447 mleftmatcher.right().IsNegative()) {
448 return ReplaceBool(true);
449 }
450 }
451 return ReduceWord32Comparisons(node);
452 }
453 case IrOpcode::kInt32LessThanOrEqual: {
454 Int32BinopMatcher m(node);
455 if (m.IsFoldable()) { // K <= K => K (K stands for arbitrary constants)
456 return ReplaceBool(m.left().ResolvedValue() <=
457 m.right().ResolvedValue());
458 }
459 if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
460 return ReduceWord32Comparisons(node);
461 }
462 case IrOpcode::kUint32LessThan: {
463 Uint32BinopMatcher m(node);
464 if (m.left().Is(kMaxUInt32)) return ReplaceBool(false); // M < x => false
465 if (m.right().Is(0)) return ReplaceBool(false); // x < 0 => false
466 if (m.IsFoldable()) { // K < K => K (K stands for arbitrary constants)
467 return ReplaceBool(m.left().ResolvedValue() <
468 m.right().ResolvedValue());
469 }
470 if (m.LeftEqualsRight()) return ReplaceBool(false); // x < x => false
471 if (m.left().IsWord32Sar() && m.right().HasResolvedValue()) {
472 Int32BinopMatcher mleft(m.left().node());
473 if (mleft.right().HasResolvedValue()) {
474 // (x >> K) < C => x < (C << K)
475 // when C < (M >> K)
476 const uint32_t c = m.right().ResolvedValue();
477 const uint32_t k = mleft.right().ResolvedValue() & 0x1F;
478 if (c < static_cast<uint32_t>(kMaxInt >> k)) {
479 node->ReplaceInput(0, mleft.left().node());
480 node->ReplaceInput(1, Uint32Constant(c << k));
481 return Changed(node);
482 }
483 // TODO(turbofan): else the comparison is always true.
484 }
485 }
486 return ReduceWord32Comparisons(node);
487 }
488 case IrOpcode::kUint32LessThanOrEqual: {
489 Uint32BinopMatcher m(node);
490 if (m.left().Is(0)) return ReplaceBool(true); // 0 <= x => true
491 if (m.right().Is(kMaxUInt32)) return ReplaceBool(true); // x <= M => true
492 if (m.IsFoldable()) { // K <= K => K (K stands for arbitrary constants)
493 return ReplaceBool(m.left().ResolvedValue() <=
494 m.right().ResolvedValue());
495 }
496 if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
497 return ReduceWord32Comparisons(node);
498 }
499 case IrOpcode::kFloat32Sub: {
500 Float32BinopMatcher m(node);
501 if (allow_signalling_nan_ && m.right().Is(0) &&
502 (std::copysign(1.0, m.right().ResolvedValue()) > 0)) {
503 return Replace(m.left().node()); // x - 0 => x
504 }
505 if (m.right().IsNaN()) { // x - NaN => NaN
506 return ReplaceFloat32(SilenceNaN(m.right().ResolvedValue()));
507 }
508 if (m.left().IsNaN()) { // NaN - x => NaN
509 return ReplaceFloat32(SilenceNaN(m.left().ResolvedValue()));
510 }
511 if (m.IsFoldable()) { // L - R => (L - R)
512 return ReplaceFloat32(m.left().ResolvedValue() -
513 m.right().ResolvedValue());
514 }
515 if (allow_signalling_nan_ && m.left().IsMinusZero()) {
516 // -0.0 - round_down(-0.0 - R) => round_up(R)
517 if (machine()->Float32RoundUp().IsSupported() &&
518 m.right().IsFloat32RoundDown()) {
519 if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat32Sub) {
520 Float32BinopMatcher mright0(m.right().InputAt(0));
521 if (mright0.left().IsMinusZero()) {
522 return Replace(graph()->NewNode(machine()->Float32RoundUp().op(),
523 mright0.right().node()));
524 }
525 }
526 }
527 // -0.0 - R => -R
528 node->RemoveInput(0);
529 NodeProperties::ChangeOp(node, machine()->Float32Neg());
530 return Changed(node);
531 }
532 break;
533 }
534 case IrOpcode::kFloat64Add: {
535 Float64BinopMatcher m(node);
536 if (m.right().IsNaN()) { // x + NaN => NaN
537 return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
538 }
539 if (m.left().IsNaN()) { // NaN + x => NaN
540 return ReplaceFloat64(SilenceNaN(m.left().ResolvedValue()));
541 }
542 if (m.IsFoldable()) { // K + K => K (K stands for arbitrary constants)
543 return ReplaceFloat64(m.left().ResolvedValue() +
544 m.right().ResolvedValue());
545 }
546 break;
547 }
548 case IrOpcode::kFloat64Sub: {
549 Float64BinopMatcher m(node);
550 if (allow_signalling_nan_ && m.right().Is(0) &&
551 (base::Double(m.right().ResolvedValue()).Sign() > 0)) {
552 return Replace(m.left().node()); // x - 0 => x
553 }
554 if (m.right().IsNaN()) { // x - NaN => NaN
555 return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
556 }
557 if (m.left().IsNaN()) { // NaN - x => NaN
558 return ReplaceFloat64(SilenceNaN(m.left().ResolvedValue()));
559 }
560 if (m.IsFoldable()) { // L - R => (L - R)
561 return ReplaceFloat64(m.left().ResolvedValue() -
562 m.right().ResolvedValue());
563 }
564 if (allow_signalling_nan_ && m.left().IsMinusZero()) {
565 // -0.0 - round_down(-0.0 - R) => round_up(R)
566 if (machine()->Float64RoundUp().IsSupported() &&
567 m.right().IsFloat64RoundDown()) {
568 if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub) {
569 Float64BinopMatcher mright0(m.right().InputAt(0));
570 if (mright0.left().IsMinusZero()) {
571 return Replace(graph()->NewNode(machine()->Float64RoundUp().op(),
572 mright0.right().node()));
573 }
574 }
575 }
576 // -0.0 - R => -R
577 node->RemoveInput(0);
578 NodeProperties::ChangeOp(node, machine()->Float64Neg());
579 return Changed(node);
580 }
581 break;
582 }
583 case IrOpcode::kFloat64Mul: {
584 Float64BinopMatcher m(node);
585 if (allow_signalling_nan_ && m.right().Is(1))
586 return Replace(m.left().node()); // x * 1.0 => x
587 if (m.right().Is(-1)) { // x * -1.0 => -0.0 - x
588 node->ReplaceInput(0, Float64Constant(-0.0));
589 node->ReplaceInput(1, m.left().node());
590 NodeProperties::ChangeOp(node, machine()->Float64Sub());
591 return Changed(node);
592 }
593 if (m.right().IsNaN()) { // x * NaN => NaN
594 return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
595 }
596 if (m.IsFoldable()) { // K * K => K (K stands for arbitrary constants)
597 return ReplaceFloat64(m.left().ResolvedValue() *
598 m.right().ResolvedValue());
599 }
600 if (m.right().Is(2)) { // x * 2.0 => x + x
601 node->ReplaceInput(1, m.left().node());
602 NodeProperties::ChangeOp(node, machine()->Float64Add());
603 return Changed(node);
604 }
605 break;
606 }
607 case IrOpcode::kFloat64Div: {
608 Float64BinopMatcher m(node);
609 if (allow_signalling_nan_ && m.right().Is(1))
610 return Replace(m.left().node()); // x / 1.0 => x
611 // TODO(ahaas): We could do x / 1.0 = x if we knew that x is not an sNaN.
612 if (m.right().IsNaN()) { // x / NaN => NaN
613 return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
614 }
615 if (m.left().IsNaN()) { // NaN / x => NaN
616 return ReplaceFloat64(SilenceNaN(m.left().ResolvedValue()));
617 }
618 if (m.IsFoldable()) { // K / K => K (K stands for arbitrary constants)
619 return ReplaceFloat64(
620 base::Divide(m.left().ResolvedValue(), m.right().ResolvedValue()));
621 }
622 if (allow_signalling_nan_ && m.right().Is(-1)) { // x / -1.0 => -x
623 node->RemoveInput(1);
624 NodeProperties::ChangeOp(node, machine()->Float64Neg());
625 return Changed(node);
626 }
627 if (m.right().IsNormal() && m.right().IsPositiveOrNegativePowerOf2()) {
628 // All reciprocals of non-denormal powers of two can be represented
629 // exactly, so division by power of two can be reduced to
630 // multiplication by reciprocal, with the same result.
631 node->ReplaceInput(1, Float64Constant(1.0 / m.right().ResolvedValue()));
632 NodeProperties::ChangeOp(node, machine()->Float64Mul());
633 return Changed(node);
634 }
635 break;
636 }
637 case IrOpcode::kFloat64Mod: {
638 Float64BinopMatcher m(node);
639 if (m.right().Is(0)) { // x % 0 => NaN
640 return ReplaceFloat64(std::numeric_limits<double>::quiet_NaN());
641 }
642 if (m.right().IsNaN()) { // x % NaN => NaN
643 return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
644 }
645 if (m.left().IsNaN()) { // NaN % x => NaN
646 return ReplaceFloat64(SilenceNaN(m.left().ResolvedValue()));
647 }
648 if (m.IsFoldable()) { // K % K => K (K stands for arbitrary constants)
649 return ReplaceFloat64(
650 Modulo(m.left().ResolvedValue(), m.right().ResolvedValue()));
651 }
652 break;
653 }
654 case IrOpcode::kFloat64Acos: {
655 Float64Matcher m(node->InputAt(0));
656 if (m.HasResolvedValue())
657 return ReplaceFloat64(base::ieee754::acos(m.ResolvedValue()));
658 break;
659 }
660 case IrOpcode::kFloat64Acosh: {
661 Float64Matcher m(node->InputAt(0));
662 if (m.HasResolvedValue())
663 return ReplaceFloat64(base::ieee754::acosh(m.ResolvedValue()));
664 break;
665 }
666 case IrOpcode::kFloat64Asin: {
667 Float64Matcher m(node->InputAt(0));
668 if (m.HasResolvedValue())
669 return ReplaceFloat64(base::ieee754::asin(m.ResolvedValue()));
670 break;
671 }
672 case IrOpcode::kFloat64Asinh: {
673 Float64Matcher m(node->InputAt(0));
674 if (m.HasResolvedValue())
675 return ReplaceFloat64(base::ieee754::asinh(m.ResolvedValue()));
676 break;
677 }
678 case IrOpcode::kFloat64Atan: {
679 Float64Matcher m(node->InputAt(0));
680 if (m.HasResolvedValue())
681 return ReplaceFloat64(base::ieee754::atan(m.ResolvedValue()));
682 break;
683 }
684 case IrOpcode::kFloat64Atanh: {
685 Float64Matcher m(node->InputAt(0));
686 if (m.HasResolvedValue())
687 return ReplaceFloat64(base::ieee754::atanh(m.ResolvedValue()));
688 break;
689 }
690 case IrOpcode::kFloat64Atan2: {
691 Float64BinopMatcher m(node);
692 if (m.right().IsNaN()) {
693 return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
694 }
695 if (m.left().IsNaN()) {
696 return ReplaceFloat64(SilenceNaN(m.left().ResolvedValue()));
697 }
698 if (m.IsFoldable()) {
699 return ReplaceFloat64(base::ieee754::atan2(m.left().ResolvedValue(),
700 m.right().ResolvedValue()));
701 }
702 break;
703 }
704 case IrOpcode::kFloat64Cbrt: {
705 Float64Matcher m(node->InputAt(0));
706 if (m.HasResolvedValue())
707 return ReplaceFloat64(base::ieee754::cbrt(m.ResolvedValue()));
708 break;
709 }
710 case IrOpcode::kFloat64Cos: {
711 Float64Matcher m(node->InputAt(0));
712 if (m.HasResolvedValue())
713 return ReplaceFloat64(base::ieee754::cos(m.ResolvedValue()));
714 break;
715 }
716 case IrOpcode::kFloat64Cosh: {
717 Float64Matcher m(node->InputAt(0));
718 if (m.HasResolvedValue())
719 return ReplaceFloat64(base::ieee754::cosh(m.ResolvedValue()));
720 break;
721 }
722 case IrOpcode::kFloat64Exp: {
723 Float64Matcher m(node->InputAt(0));
724 if (m.HasResolvedValue())
725 return ReplaceFloat64(base::ieee754::exp(m.ResolvedValue()));
726 break;
727 }
728 case IrOpcode::kFloat64Expm1: {
729 Float64Matcher m(node->InputAt(0));
730 if (m.HasResolvedValue())
731 return ReplaceFloat64(base::ieee754::expm1(m.ResolvedValue()));
732 break;
733 }
734 case IrOpcode::kFloat64Log: {
735 Float64Matcher m(node->InputAt(0));
736 if (m.HasResolvedValue())
737 return ReplaceFloat64(base::ieee754::log(m.ResolvedValue()));
738 break;
739 }
740 case IrOpcode::kFloat64Log1p: {
741 Float64Matcher m(node->InputAt(0));
742 if (m.HasResolvedValue())
743 return ReplaceFloat64(base::ieee754::log1p(m.ResolvedValue()));
744 break;
745 }
746 case IrOpcode::kFloat64Log10: {
747 Float64Matcher m(node->InputAt(0));
748 if (m.HasResolvedValue())
749 return ReplaceFloat64(base::ieee754::log10(m.ResolvedValue()));
750 break;
751 }
752 case IrOpcode::kFloat64Log2: {
753 Float64Matcher m(node->InputAt(0));
754 if (m.HasResolvedValue())
755 return ReplaceFloat64(base::ieee754::log2(m.ResolvedValue()));
756 break;
757 }
758 case IrOpcode::kFloat64Pow: {
759 Float64BinopMatcher m(node);
760 if (m.IsFoldable()) {
761 return ReplaceFloat64(base::ieee754::pow(m.left().ResolvedValue(),
762 m.right().ResolvedValue()));
763 } else if (m.right().Is(0.0)) { // x ** +-0.0 => 1.0
764 return ReplaceFloat64(1.0);
765 } else if (m.right().Is(2.0)) { // x ** 2.0 => x * x
766 node->ReplaceInput(1, m.left().node());
767 NodeProperties::ChangeOp(node, machine()->Float64Mul());
768 return Changed(node);
769 } else if (m.right().Is(0.5)) {
770 // x ** 0.5 => if x <= -Infinity then Infinity else sqrt(0.0 + x)
771 return Replace(Float64PowHalf(m.left().node()));
772 }
773 break;
774 }
775 case IrOpcode::kFloat64Sin: {
776 Float64Matcher m(node->InputAt(0));
777 if (m.HasResolvedValue())
778 return ReplaceFloat64(base::ieee754::sin(m.ResolvedValue()));
779 break;
780 }
781 case IrOpcode::kFloat64Sinh: {
782 Float64Matcher m(node->InputAt(0));
783 if (m.HasResolvedValue())
784 return ReplaceFloat64(base::ieee754::sinh(m.ResolvedValue()));
785 break;
786 }
787 case IrOpcode::kFloat64Tan: {
788 Float64Matcher m(node->InputAt(0));
789 if (m.HasResolvedValue())
790 return ReplaceFloat64(base::ieee754::tan(m.ResolvedValue()));
791 break;
792 }
793 case IrOpcode::kFloat64Tanh: {
794 Float64Matcher m(node->InputAt(0));
795 if (m.HasResolvedValue())
796 return ReplaceFloat64(base::ieee754::tanh(m.ResolvedValue()));
797 break;
798 }
799 case IrOpcode::kChangeFloat32ToFloat64: {
800 Float32Matcher m(node->InputAt(0));
801 if (m.HasResolvedValue()) {
802 if (!allow_signalling_nan_ && std::isnan(m.ResolvedValue())) {
803 return ReplaceFloat64(SilenceNaN(m.ResolvedValue()));
804 }
805 return ReplaceFloat64(m.ResolvedValue());
806 }
807 break;
808 }
809 case IrOpcode::kChangeFloat64ToInt32: {
810 Float64Matcher m(node->InputAt(0));
811 if (m.HasResolvedValue())
812 return ReplaceInt32(FastD2IChecked(m.ResolvedValue()));
813 if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
814 break;
815 }
816 case IrOpcode::kChangeFloat64ToInt64: {
817 Float64Matcher m(node->InputAt(0));
818 if (m.HasResolvedValue())
819 return ReplaceInt64(static_cast<int64_t>(m.ResolvedValue()));
820 if (m.IsChangeInt64ToFloat64()) return Replace(m.node()->InputAt(0));
821 break;
822 }
823 case IrOpcode::kChangeFloat64ToUint32: {
824 Float64Matcher m(node->InputAt(0));
825 if (m.HasResolvedValue())
826 return ReplaceInt32(FastD2UI(m.ResolvedValue()));
827 if (m.IsChangeUint32ToFloat64()) return Replace(m.node()->InputAt(0));
828 break;
829 }
830 case IrOpcode::kChangeInt32ToFloat64: {
831 Int32Matcher m(node->InputAt(0));
832 if (m.HasResolvedValue())
833 return ReplaceFloat64(FastI2D(m.ResolvedValue()));
834 break;
835 }
836 case IrOpcode::kBitcastWord32ToWord64: {
837 Int32Matcher m(node->InputAt(0));
838 if (m.HasResolvedValue()) return ReplaceInt64(m.ResolvedValue());
839 break;
840 }
841 case IrOpcode::kChangeInt32ToInt64: {
842 Int32Matcher m(node->InputAt(0));
843 if (m.HasResolvedValue()) return ReplaceInt64(m.ResolvedValue());
844 break;
845 }
846 case IrOpcode::kChangeInt64ToFloat64: {
847 Int64Matcher m(node->InputAt(0));
848 if (m.HasResolvedValue())
849 return ReplaceFloat64(static_cast<double>(m.ResolvedValue()));
850 if (m.IsChangeFloat64ToInt64()) return Replace(m.node()->InputAt(0));
851 break;
852 }
853 case IrOpcode::kChangeUint32ToFloat64: {
854 Uint32Matcher m(node->InputAt(0));
855 if (m.HasResolvedValue())
856 return ReplaceFloat64(FastUI2D(m.ResolvedValue()));
857 break;
858 }
859 case IrOpcode::kChangeUint32ToUint64: {
860 Uint32Matcher m(node->InputAt(0));
861 if (m.HasResolvedValue())
862 return ReplaceInt64(static_cast<uint64_t>(m.ResolvedValue()));
863 break;
864 }
865 case IrOpcode::kTruncateFloat64ToWord32: {
866 Float64Matcher m(node->InputAt(0));
867 if (m.HasResolvedValue())
868 return ReplaceInt32(DoubleToInt32(m.ResolvedValue()));
869 if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
870 return NoChange();
871 }
872 case IrOpcode::kTruncateInt64ToInt32:
873 return ReduceTruncateInt64ToInt32(node);
874 case IrOpcode::kTruncateFloat64ToFloat32: {
875 Float64Matcher m(node->InputAt(0));
876 if (m.HasResolvedValue()) {
877 if (!allow_signalling_nan_ && m.IsNaN()) {
878 return ReplaceFloat32(DoubleToFloat32(SilenceNaN(m.ResolvedValue())));
879 }
880 return ReplaceFloat32(DoubleToFloat32(m.ResolvedValue()));
881 }
882 if (allow_signalling_nan_ && m.IsChangeFloat32ToFloat64())
883 return Replace(m.node()->InputAt(0));
884 break;
885 }
886 case IrOpcode::kRoundFloat64ToInt32: {
887 Float64Matcher m(node->InputAt(0));
888 if (m.HasResolvedValue()) {
889 return ReplaceInt32(DoubleToInt32(m.ResolvedValue()));
890 }
891 if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
892 break;
893 }
894 case IrOpcode::kFloat64InsertLowWord32:
895 return ReduceFloat64InsertLowWord32(node);
896 case IrOpcode::kFloat64InsertHighWord32:
897 return ReduceFloat64InsertHighWord32(node);
898 case IrOpcode::kStore:
899 case IrOpcode::kUnalignedStore:
900 return ReduceStore(node);
901 case IrOpcode::kFloat64Equal:
902 case IrOpcode::kFloat64LessThan:
903 case IrOpcode::kFloat64LessThanOrEqual:
904 return ReduceFloat64Compare(node);
905 case IrOpcode::kFloat64RoundDown:
906 return ReduceFloat64RoundDown(node);
907 case IrOpcode::kBitcastTaggedToWord:
908 case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits: {
909 NodeMatcher m(node->InputAt(0));
910 if (m.IsBitcastWordToTaggedSigned()) {
911 RelaxEffectsAndControls(node);
912 return Replace(m.InputAt(0));
913 }
914 break;
915 }
916 case IrOpcode::kBranch:
917 case IrOpcode::kDeoptimizeIf:
918 case IrOpcode::kDeoptimizeUnless:
919 case IrOpcode::kTrapIf:
920 case IrOpcode::kTrapUnless:
921 return ReduceConditional(node);
922 case IrOpcode::kInt64LessThan: {
923 Int64BinopMatcher m(node);
924 if (m.IsFoldable()) { // K < K => K (K stands for arbitrary constants)
925 return ReplaceBool(m.left().ResolvedValue() <
926 m.right().ResolvedValue());
927 }
928 return ReduceWord64Comparisons(node);
929 }
930 case IrOpcode::kInt64LessThanOrEqual: {
931 Int64BinopMatcher m(node);
932 if (m.IsFoldable()) { // K <= K => K (K stands for arbitrary constants)
933 return ReplaceBool(m.left().ResolvedValue() <=
934 m.right().ResolvedValue());
935 }
936 return ReduceWord64Comparisons(node);
937 }
938 case IrOpcode::kUint64LessThan: {
939 Uint64BinopMatcher m(node);
940 if (m.IsFoldable()) { // K < K => K (K stands for arbitrary constants)
941 return ReplaceBool(m.left().ResolvedValue() <
942 m.right().ResolvedValue());
943 }
944 return ReduceWord64Comparisons(node);
945 }
946 case IrOpcode::kUint64LessThanOrEqual: {
947 Uint64BinopMatcher m(node);
948 if (m.IsFoldable()) { // K <= K => K (K stands for arbitrary constants)
949 return ReplaceBool(m.left().ResolvedValue() <=
950 m.right().ResolvedValue());
951 }
952 return ReduceWord64Comparisons(node);
953 }
954 case IrOpcode::kFloat32Select:
955 case IrOpcode::kFloat64Select:
956 case IrOpcode::kWord32Select:
957 case IrOpcode::kWord64Select: {
958 Int32Matcher match(node->InputAt(0));
959 if (match.HasResolvedValue()) {
960 if (match.Is(0)) {
961 return Replace(node->InputAt(2));
962 } else {
963 return Replace(node->InputAt(1));
964 }
965 }
966 break;
967 }
968 default:
969 break;
970 }
971 return NoChange();
972}
973
974Reduction MachineOperatorReducer::ReduceTruncateInt64ToInt32(Node* node) {
975 Int64Matcher m(node->InputAt(0));
976 if (m.HasResolvedValue())
977 return ReplaceInt32(static_cast<int32_t>(m.ResolvedValue()));
978 if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0));
979 return NoChange();
980}
981
982Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
983 DCHECK_EQ(IrOpcode::kInt32Add, node->opcode())((void) 0);
984 Int32BinopMatcher m(node);
985 if (m.right().Is(0)) return Replace(m.left().node()); // x + 0 => x
986 if (m.IsFoldable()) { // K + K => K (K stands for arbitrary constants)
987 return ReplaceInt32(base::AddWithWraparound(m.left().ResolvedValue(),
988 m.right().ResolvedValue()));
989 }
990 if (m.left().IsInt32Sub()) {
991 Int32BinopMatcher mleft(m.left().node());
992 if (mleft.left().Is(0)) { // (0 - x) + y => y - x
993 node->ReplaceInput(0, m.right().node());
994 node->ReplaceInput(1, mleft.right().node());
995 NodeProperties::ChangeOp(node, machine()->Int32Sub());
996 return Changed(node).FollowedBy(ReduceInt32Sub(node));
997 }
998 }
999 if (m.right().IsInt32Sub()) {
1000 Int32BinopMatcher mright(m.right().node());
1001 if (mright.left().Is(0)) { // y + (0 - x) => y - x
1002 node->ReplaceInput(1, mright.right().node());
1003 NodeProperties::ChangeOp(node, machine()->Int32Sub());
1004 return Changed(node).FollowedBy(ReduceInt32Sub(node));
1005 }
1006 }
1007 // (x + Int32Constant(a)) + Int32Constant(b)) => x + Int32Constant(a + b)
1008 if (m.right().HasResolvedValue() && m.left().IsInt32Add()) {
1009 Int32BinopMatcher n(m.left().node());
1010 if (n.right().HasResolvedValue() && m.OwnsInput(m.left().node())) {
1011 node->ReplaceInput(
1012 1, Int32Constant(base::AddWithWraparound(m.right().ResolvedValue(),
1013 n.right().ResolvedValue())));
1014 node->ReplaceInput(0, n.left().node());
1015 return Changed(node);
1016 }
1017 }
1018
1019 return NoChange();
1020}
1021
1022Reduction MachineOperatorReducer::ReduceInt64Add(Node* node) {
1023 DCHECK_EQ(IrOpcode::kInt64Add, node->opcode())((void) 0);
1024 Int64BinopMatcher m(node);
1025 if (m.right().Is(0)) return Replace(m.left().node()); // x + 0 => 0
1026 if (m.IsFoldable()) {
1027 return ReplaceInt64(base::AddWithWraparound(m.left().ResolvedValue(),
1028 m.right().ResolvedValue()));
1029 }
1030 // (x + Int64Constant(a)) + Int64Constant(b) => x + Int64Constant(a + b)
1031 if (m.right().HasResolvedValue() && m.left().IsInt64Add()) {
1032 Int64BinopMatcher n(m.left().node());
1033 if (n.right().HasResolvedValue() && m.OwnsInput(m.left().node())) {
1034 node->ReplaceInput(
1035 1, Int64Constant(base::AddWithWraparound(m.right().ResolvedValue(),
1036 n.right().ResolvedValue())));
1037 node->ReplaceInput(0, n.left().node());
1038 return Changed(node);
1039 }
1040 }
1041 return NoChange();
1042}
1043
1044Reduction MachineOperatorReducer::ReduceInt32Sub(Node* node) {
1045 DCHECK_EQ(IrOpcode::kInt32Sub, node->opcode())((void) 0);
1046 Int32BinopMatcher m(node);
1047 if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x
1048 if (m.IsFoldable()) { // K - K => K (K stands for arbitrary constants)
1049 return ReplaceInt32(base::SubWithWraparound(m.left().ResolvedValue(),
1050 m.right().ResolvedValue()));
1051 }
1052 if (m.LeftEqualsRight()) return ReplaceInt32(0); // x - x => 0
1053 if (m.right().HasResolvedValue()) { // x - K => x + -K
1054 node->ReplaceInput(
1055 1,
1056 Int32Constant(base::NegateWithWraparound(m.right().ResolvedValue())));
1057 NodeProperties::ChangeOp(node, machine()->Int32Add());
1058 return Changed(node).FollowedBy(ReduceInt32Add(node));
1059 }
1060 return NoChange();
1061}
1062
1063Reduction MachineOperatorReducer::ReduceInt64Sub(Node* node) {
1064 DCHECK_EQ(IrOpcode::kInt64Sub, node->opcode())((void) 0);
1065 Int64BinopMatcher m(node);
1066 if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x
1067 if (m.IsFoldable()) { // K - K => K (K stands for arbitrary constants)
1068 return ReplaceInt64(base::SubWithWraparound(m.left().ResolvedValue(),
1069 m.right().ResolvedValue()));
1070 }
1071 if (m.LeftEqualsRight()) return Replace(Int64Constant(0)); // x - x => 0
1072 if (m.right().HasResolvedValue()) { // x - K => x + -K
1073 node->ReplaceInput(
1074 1,
1075 Int64Constant(base::NegateWithWraparound(m.right().ResolvedValue())));
1076 NodeProperties::ChangeOp(node, machine()->Int64Add());
1077 return Changed(node).FollowedBy(ReduceInt64Add(node));
1078 }
1079 return NoChange();
1080}
1081
1082Reduction MachineOperatorReducer::ReduceInt64Mul(Node* node) {
1083 DCHECK_EQ(IrOpcode::kInt64Mul, node->opcode())((void) 0);
1084 Int64BinopMatcher m(node);
1085 if (m.right().Is(0)) return Replace(m.right().node()); // x * 0 => 0
1086 if (m.right().Is(1)) return Replace(m.left().node()); // x * 1 => x
1087 if (m.IsFoldable()) { // K * K => K (K stands for arbitrary constants)
1088 return ReplaceInt64(base::MulWithWraparound(m.left().ResolvedValue(),
1089 m.right().ResolvedValue()));
1090 }
1091 if (m.right().Is(-1)) { // x * -1 => 0 - x
1092 node->ReplaceInput(0, Int64Constant(0));
1093 node->ReplaceInput(1, m.left().node());
1094 NodeProperties::ChangeOp(node, machine()->Int64Sub());
1095 return Changed(node);
1096 }
1097 if (m.right().IsPowerOf2()) { // x * 2^n => x << n
1098 node->ReplaceInput(
1099 1,
1100 Int64Constant(base::bits::WhichPowerOfTwo(m.right().ResolvedValue())));
1101 NodeProperties::ChangeOp(node, machine()->Word64Shl());
1102 return Changed(node).FollowedBy(ReduceWord64Shl(node));
1103 }
1104 // (x * Int64Constant(a)) * Int64Constant(b)) => x * Int64Constant(a * b)
1105 if (m.right().HasResolvedValue() && m.left().IsInt64Mul()) {
1106 Int64BinopMatcher n(m.left().node());
1107 if (n.right().HasResolvedValue() && m.OwnsInput(m.left().node())) {
1108 node->ReplaceInput(
1109 1, Int64Constant(base::MulWithWraparound(m.right().ResolvedValue(),
1110 n.right().ResolvedValue())));
1111 node->ReplaceInput(0, n.left().node());
1112 return Changed(node);
1113 }
1114 }
1115 return NoChange();
1116}
1117
1118Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
1119 Int32BinopMatcher m(node);
1120 if (m.left().Is(0)) return Replace(m.left().node()); // 0 / x => 0
1121 if (m.right().Is(0)) return Replace(m.right().node()); // x / 0 => 0
1122 if (m.right().Is(1)) return Replace(m.left().node()); // x / 1 => x
1123 if (m.IsFoldable()) { // K / K => K (K stands for arbitrary constants)
1124 return ReplaceInt32(base::bits::SignedDiv32(m.left().ResolvedValue(),
1125 m.right().ResolvedValue()));
1126 }
1127 if (m.LeftEqualsRight()) { // x / x => x != 0
1128 Node* const zero = Int32Constant(0);
1129 return Replace(Word32Equal(Word32Equal(m.left().node(), zero), zero));
1130 }
1131 if (m.right().Is(-1)) { // x / -1 => 0 - x
1132 node->ReplaceInput(0, Int32Constant(0));
1133 node->ReplaceInput(1, m.left().node());
1134 node->TrimInputCount(2);
1135 NodeProperties::ChangeOp(node, machine()->Int32Sub());
1136 return Changed(node);
1137 }
1138 if (m.right().HasResolvedValue()) {
1139 int32_t const divisor = m.right().ResolvedValue();
1140 Node* const dividend = m.left().node();
1141 Node* quotient = dividend;
1142 if (base::bits::IsPowerOfTwo(Abs(divisor))) {
1143 uint32_t const shift = base::bits::WhichPowerOfTwo(Abs(divisor));
1144 DCHECK_NE(0u, shift)((void) 0);
1145 if (shift > 1) {
1146 quotient = Word32Sar(quotient, 31);
1147 }
1148 quotient = Int32Add(Word32Shr(quotient, 32u - shift), dividend);
1149 quotient = Word32Sar(quotient, shift);
1150 } else {
1151 quotient = Int32Div(quotient, Abs(divisor));
1152 }
1153 if (divisor < 0) {
1154 node->ReplaceInput(0, Int32Constant(0));
1155 node->ReplaceInput(1, quotient);
1156 node->TrimInputCount(2);
1157 NodeProperties::ChangeOp(node, machine()->Int32Sub());
1158 return Changed(node);
1159 }
1160 return Replace(quotient);
1161 }
1162 return NoChange();
1163}
1164
1165Reduction MachineOperatorReducer::ReduceUint32Div(Node* node) {
1166 Uint32BinopMatcher m(node);
1167 if (m.left().Is(0)) return Replace(m.left().node()); // 0 / x => 0
1168 if (m.right().Is(0)) return Replace(m.right().node()); // x / 0 => 0
1169 if (m.right().Is(1)) return Replace(m.left().node()); // x / 1 => x
1170 if (m.IsFoldable()) { // K / K => K (K stands for arbitrary constants)
1171 return ReplaceUint32(base::bits::UnsignedDiv32(m.left().ResolvedValue(),
1172 m.right().ResolvedValue()));
1173 }
1174 if (m.LeftEqualsRight()) { // x / x => x != 0
1175 Node* const zero = Int32Constant(0);
1176 return Replace(Word32Equal(Word32Equal(m.left().node(), zero), zero));
1177 }
1178 if (m.right().HasResolvedValue()) {
1179 Node* const dividend = m.left().node();
1180 uint32_t const divisor = m.right().ResolvedValue();
1181 if (base::bits::IsPowerOfTwo(divisor)) { // x / 2^n => x >> n
1182 node->ReplaceInput(1, Uint32Constant(base::bits::WhichPowerOfTwo(
1183 m.right().ResolvedValue())));
1184 node->TrimInputCount(2);
1185 NodeProperties::ChangeOp(node, machine()->Word32Shr());
1186 return Changed(node);
1187 } else {
1188 return Replace(Uint32Div(dividend, divisor));
1189 }
1190 }
1191 return NoChange();
1192}
1193
1194Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) {
1195 Int32BinopMatcher m(node);
1196 if (m.left().Is(0)) return Replace(m.left().node()); // 0 % x => 0
1197 if (m.right().Is(0)) return Replace(m.right().node()); // x % 0 => 0
1198 if (m.right().Is(1)) return ReplaceInt32(0); // x % 1 => 0
1199 if (m.right().Is(-1)) return ReplaceInt32(0); // x % -1 => 0
1200 if (m.LeftEqualsRight()) return ReplaceInt32(0); // x % x => 0
1201 if (m.IsFoldable()) { // K % K => K (K stands for arbitrary constants)
1202 return ReplaceInt32(base::bits::SignedMod32(m.left().ResolvedValue(),
1203 m.right().ResolvedValue()));
1204 }
1205 if (m.right().HasResolvedValue()) {
1206 Node* const dividend = m.left().node();
1207 uint32_t const divisor = Abs(m.right().ResolvedValue());
1208 if (base::bits::IsPowerOfTwo(divisor)) {
1209 uint32_t const mask = divisor - 1;
1210 Node* const zero = Int32Constant(0);
1211 Diamond d(graph(), common(),
1212 graph()->NewNode(machine()->Int32LessThan(), dividend, zero),
1213 BranchHint::kFalse);
1214 return Replace(
1215 d.Phi(MachineRepresentation::kWord32,
1216 Int32Sub(zero, Word32And(Int32Sub(zero, dividend), mask)),
1217 Word32And(dividend, mask)));
1218 } else {
1219 Node* quotient = Int32Div(dividend, divisor);
1220 DCHECK_EQ(dividend, node->InputAt(0))((void) 0);
1221 node->ReplaceInput(1, Int32Mul(quotient, Int32Constant(divisor)));
1222 node->TrimInputCount(2);
1223 NodeProperties::ChangeOp(node, machine()->Int32Sub());
1224 }
1225 return Changed(node);
1226 }
1227 return NoChange();
1228}
1229
1230Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) {
1231 Uint32BinopMatcher m(node);
1232 if (m.left().Is(0)) return Replace(m.left().node()); // 0 % x => 0
1233 if (m.right().Is(0)) return Replace(m.right().node()); // x % 0 => 0
1234 if (m.right().Is(1)) return ReplaceUint32(0); // x % 1 => 0
1235 if (m.LeftEqualsRight()) return ReplaceInt32(0); // x % x => 0
1236 if (m.IsFoldable()) { // K % K => K (K stands for arbitrary constants)
1237 return ReplaceUint32(base::bits::UnsignedMod32(m.left().ResolvedValue(),
1238 m.right().ResolvedValue()));
1239 }
1240 if (m.right().HasResolvedValue()) {
1241 Node* const dividend = m.left().node();
1242 uint32_t const divisor = m.right().ResolvedValue();
1243 if (base::bits::IsPowerOfTwo(divisor)) { // x % 2^n => x & 2^n-1
1244 node->ReplaceInput(1, Uint32Constant(m.right().ResolvedValue() - 1));
1245 node->TrimInputCount(2);
1246 NodeProperties::ChangeOp(node, machine()->Word32And());
1247 } else {
1248 Node* quotient = Uint32Div(dividend, divisor);
1249 DCHECK_EQ(dividend, node->InputAt(0))((void) 0);
1250 node->ReplaceInput(1, Int32Mul(quotient, Uint32Constant(divisor)));
1251 node->TrimInputCount(2);
1252 NodeProperties::ChangeOp(node, machine()->Int32Sub());
1253 }
1254 return Changed(node);
1255 }
1256 return NoChange();
1257}
1258
1259Reduction MachineOperatorReducer::ReduceStore(Node* node) {
1260 NodeMatcher nm(node);
1261 DCHECK(nm.IsStore() || nm.IsUnalignedStore())((void) 0);
1262 MachineRepresentation rep =
1263 nm.IsStore() ? StoreRepresentationOf(node->op()).representation()
1264 : UnalignedStoreRepresentationOf(node->op());
1265
1266 const int value_input = 2;
1267 Node* const value = node->InputAt(value_input);
1268
1269 switch (value->opcode()) {
1270 case IrOpcode::kWord32And: {
1271 Uint32BinopMatcher m(value);
1272 if (m.right().HasResolvedValue() &&
1273 ((rep == MachineRepresentation::kWord8 &&
1274 (m.right().ResolvedValue() & 0xFF) == 0xFF) ||
1275 (rep == MachineRepresentation::kWord16 &&
1276 (m.right().ResolvedValue() & 0xFFFF) == 0xFFFF))) {
1277 node->ReplaceInput(value_input, m.left().node());
1278 return Changed(node);
1279 }
1280 break;
1281 }
1282 case IrOpcode::kWord32Sar: {
1283 Int32BinopMatcher m(value);
1284 if (m.left().IsWord32Shl() && ((rep == MachineRepresentation::kWord8 &&
1285 m.right().IsInRange(1, 24)) ||
1286 (rep == MachineRepresentation::kWord16 &&
1287 m.right().IsInRange(1, 16)))) {
1288 Int32BinopMatcher mleft(m.left().node());
1289 if (mleft.right().Is(m.right().ResolvedValue())) {
1290 node->ReplaceInput(value_input, mleft.left().node());
1291 return Changed(node);
1292 }
1293 }
1294 break;
1295 }
1296 default:
1297 break;
1298 }
1299 return NoChange();
1300}
1301
1302Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
1303 switch (node->opcode()) {
1304 case IrOpcode::kInt32AddWithOverflow: {
1305 DCHECK(index == 0 || index == 1)((void) 0);
1306 Int32BinopMatcher m(node);
1307 if (m.IsFoldable()) {
1308 int32_t val;
1309 bool ovf = base::bits::SignedAddOverflow32(
1310 m.left().ResolvedValue(), m.right().ResolvedValue(), &val);
1311 return ReplaceInt32(index == 0 ? val : ovf);
1312 }
1313 if (m.right().Is(0)) {
1314 return Replace(index == 0 ? m.left().node() : m.right().node());
1315 }
1316 break;
1317 }
1318 case IrOpcode::kInt32SubWithOverflow: {
1319 DCHECK(index == 0 || index == 1)((void) 0);
1320 Int32BinopMatcher m(node);
1321 if (m.IsFoldable()) {
1322 int32_t val;
1323 bool ovf = base::bits::SignedSubOverflow32(
1324 m.left().ResolvedValue(), m.right().ResolvedValue(), &val);
1325 return ReplaceInt32(index == 0 ? val : ovf);
1326 }
1327 if (m.right().Is(0)) {
1328 return Replace(index == 0 ? m.left().node() : m.right().node());
1329 }
1330 break;
1331 }
1332 case IrOpcode::kInt32MulWithOverflow: {
1333 DCHECK(index == 0 || index == 1)((void) 0);
1334 Int32BinopMatcher m(node);
1335 if (m.IsFoldable()) {
1336 int32_t val;
1337 bool ovf = base::bits::SignedMulOverflow32(
1338 m.left().ResolvedValue(), m.right().ResolvedValue(), &val);
1339 return ReplaceInt32(index == 0 ? val : ovf);
1340 }
1341 if (m.right().Is(0)) {
1342 return Replace(m.right().node());
1343 }
1344 if (m.right().Is(1)) {
1345 return index == 0 ? Replace(m.left().node()) : ReplaceInt32(0);
1346 }
1347 break;
1348 }
1349 default:
1350 break;
1351 }
1352 return NoChange();
1353}
1354
1355namespace {
1356
1357// Returns true if "value << shift >> shift == value". This can be interpreted
1358// as "left shifting |value| by |shift| doesn't shift away significant bits".
1359// Or, equivalently, "left shifting |value| by |shift| doesn't have signed
1360// overflow".
1361bool CanRevertLeftShiftWithRightShift(int32_t value, int32_t shift) {
1362 if (shift < 0 || shift >= 32) {
1363 // This shift would be UB in C++
1364 return false;
1365 }
1366 if (static_cast<int32_t>(static_cast<uint32_t>(value) << shift) >> shift !=
1367 static_cast<int32_t>(value)) {
1368 return false;
1369 }
1370 return true;
1371}
1372
1373} // namespace
1374
1375Reduction MachineOperatorReducer::ReduceWord32Comparisons(Node* node) {
1376 DCHECK(node->opcode() == IrOpcode::kInt32LessThan ||((void) 0)
1377 node->opcode() == IrOpcode::kInt32LessThanOrEqual ||((void) 0)
1378 node->opcode() == IrOpcode::kUint32LessThan ||((void) 0)
1379 node->opcode() == IrOpcode::kUint32LessThanOrEqual)((void) 0);
1380 Int32BinopMatcher m(node);
1381 // (x >> K) < (y >> K) => x < y if only zeros shifted out
1382 if (m.left().op() == machine()->Word32SarShiftOutZeros() &&
1383 m.right().op() == machine()->Word32SarShiftOutZeros()) {
1384 Int32BinopMatcher mleft(m.left().node());
1385 Int32BinopMatcher mright(m.right().node());
1386 if (mleft.right().HasResolvedValue() &&
1387 mright.right().Is(mleft.right().ResolvedValue())) {
1388 node->ReplaceInput(0, mleft.left().node());
1389 node->ReplaceInput(1, mright.left().node());
1390 return Changed(node);
1391 }
1392 }
1393 // Simplifying (x >> n) <= k into x <= (k << n), with "k << n" being
1394 // computed here at compile time.
1395 if (m.right().HasResolvedValue() &&
1396 m.left().op() == machine()->Word32SarShiftOutZeros() &&
1397 m.left().node()->UseCount() == 1) {
1398 uint32_t right = m.right().ResolvedValue();
1399 Int32BinopMatcher mleft(m.left().node());
1400 if (mleft.right().HasResolvedValue()) {
1401 auto shift = mleft.right().ResolvedValue();
1402 if (CanRevertLeftShiftWithRightShift(right, shift)) {
1403 node->ReplaceInput(0, mleft.left().node());
1404 node->ReplaceInput(1, Int32Constant(right << shift));
1405 return Changed(node);
1406 }
1407 }
1408 }
1409 // Simplifying k <= (x >> n) into (k << n) <= x, with "k << n" being
1410 // computed here at compile time.
1411 if (m.left().HasResolvedValue() &&
1412 m.right().op() == machine()->Word32SarShiftOutZeros() &&
1413 m.right().node()->UseCount() == 1) {
1414 uint32_t left = m.left().ResolvedValue();
1415 Int32BinopMatcher mright(m.right().node());
1416 if (mright.right().HasResolvedValue()) {
1417 auto shift = mright.right().ResolvedValue();
1418 if (CanRevertLeftShiftWithRightShift(left, shift)) {
1419 node->ReplaceInput(0, Int32Constant(left << shift));
1420 node->ReplaceInput(1, mright.left().node());
1421 return Changed(node);
1422 }
1423 }
1424 }
1425 return NoChange();
1426}
1427
1428const Operator* MachineOperatorReducer::Map64To32Comparison(
1429 const Operator* op, bool sign_extended) {
1430 switch (op->opcode()) {
1431 case IrOpcode::kInt64LessThan:
1432 return sign_extended ? machine()->Int32LessThan()
1433 : machine()->Uint32LessThan();
1434 case IrOpcode::kInt64LessThanOrEqual:
1435 return sign_extended ? machine()->Int32LessThanOrEqual()
1436 : machine()->Uint32LessThanOrEqual();
1437 case IrOpcode::kUint64LessThan:
1438 return machine()->Uint32LessThan();
1439 case IrOpcode::kUint64LessThanOrEqual:
1440 return machine()->Uint32LessThanOrEqual();
1441 default:
1442 UNREACHABLE()V8_Fatal("unreachable code");
1443 }
1444}
1445
1446Reduction MachineOperatorReducer::ReduceWord64Comparisons(Node* node) {
1447 DCHECK(node->opcode() == IrOpcode::kInt64LessThan ||((void) 0)
1448 node->opcode() == IrOpcode::kInt64LessThanOrEqual ||((void) 0)
1449 node->opcode() == IrOpcode::kUint64LessThan ||((void) 0)
1450 node->opcode() == IrOpcode::kUint64LessThanOrEqual)((void) 0);
1451 Int64BinopMatcher m(node);
1452
1453 bool sign_extended =
1454 m.left().IsChangeInt32ToInt64() && m.right().IsChangeInt32ToInt64();
1455 if (sign_extended || (m.left().IsChangeUint32ToUint64() &&
1456 m.right().IsChangeUint32ToUint64())) {
1457 node->ReplaceInput(0, NodeProperties::GetValueInput(m.left().node(), 0));
1458 node->ReplaceInput(1, NodeProperties::GetValueInput(m.right().node(), 0));
1459 NodeProperties::ChangeOp(node,
1460 Map64To32Comparison(node->op(), sign_extended));
1461 return Changed(node).FollowedBy(Reduce(node));
1462 }
1463
1464 // (x >> K) < (y >> K) => x < y if only zeros shifted out
1465 // This is useful for Smi untagging, which results in such a shift.
1466 if (m.left().op() == machine()->Word64SarShiftOutZeros() &&
1467 m.right().op() == machine()->Word64SarShiftOutZeros()) {
1468 Int64BinopMatcher mleft(m.left().node());
1469 Int64BinopMatcher mright(m.right().node());
1470 if (mleft.right().HasResolvedValue() &&
1471 mright.right().Is(mleft.right().ResolvedValue())) {
1472 node->ReplaceInput(0, mleft.left().node());
1473 node->ReplaceInput(1, mright.left().node());
1474 return Changed(node);
1475 }
1476 }
1477
1478 return NoChange();
1479}
1480
1481Reduction MachineOperatorReducer::ReduceWord32Shifts(Node* node) {
1482 DCHECK((node->opcode() == IrOpcode::kWord32Shl) ||((void) 0)
1483 (node->opcode() == IrOpcode::kWord32Shr) ||((void) 0)
1484 (node->opcode() == IrOpcode::kWord32Sar))((void) 0);
1485 if (machine()->Word32ShiftIsSafe()) {
1486 // Remove the explicit 'and' with 0x1F if the shift provided by the machine
1487 // instruction matches that required by JavaScript.
1488 Int32BinopMatcher m(node);
1489 if (m.right().IsWord32And()) {
1490 Int32BinopMatcher mright(m.right().node());
1491 if (mright.right().Is(0x1F)) {
1492 node->ReplaceInput(1, mright.left().node());
1493 return Changed(node);
1494 }
1495 }
1496 }
1497 return NoChange();
1498}
1499
1500Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
1501 DCHECK_EQ(IrOpcode::kWord32Shl, node->opcode())((void) 0);
1502 Int32BinopMatcher m(node);
1503 if (m.right().Is(0)) return Replace(m.left().node()); // x << 0 => x
1504 if (m.IsFoldable()) { // K << K => K (K stands for arbitrary constants)
1505 return ReplaceInt32(base::ShlWithWraparound(m.left().ResolvedValue(),
1506 m.right().ResolvedValue()));
1507 }
1508 if (m.right().IsInRange(1, 31)) {
1509 if (m.left().IsWord32Sar() || m.left().IsWord32Shr()) {
1510 Int32BinopMatcher mleft(m.left().node());
1511
1512 // If x >> K only shifted out zeros:
1513 // (x >> K) << L => x if K == L
1514 // (x >> K) << L => x >> (K-L) if K > L
1515 // (x >> K) << L => x << (L-K) if K < L
1516 // Since this is used for Smi untagging, we currently only need it for
1517 // signed shifts.
1518 if (mleft.op() == machine()->Word32SarShiftOutZeros() &&
1519 mleft.right().IsInRange(1, 31)) {
1520 Node* x = mleft.left().node();
1521 int k = mleft.right().ResolvedValue();
1522 int l = m.right().ResolvedValue();
1523 if (k == l) {
1524 return Replace(x);
1525 } else if (k > l) {
1526 node->ReplaceInput(0, x);
1527 node->ReplaceInput(1, Uint32Constant(k - l));
1528 NodeProperties::ChangeOp(node, machine()->Word32Sar());
1529 return Changed(node).FollowedBy(ReduceWord32Sar(node));
1530 } else {
1531 DCHECK(k < l)((void) 0);
1532 node->ReplaceInput(0, x);
1533 node->ReplaceInput(1, Uint32Constant(l - k));
1534 return Changed(node);
1535 }
1536 }
1537
1538 // (x >>> K) << K => x & ~(2^K - 1)
1539 // (x >> K) << K => x & ~(2^K - 1)
1540 if (mleft.right().Is(m.right().ResolvedValue())) {
1541 node->ReplaceInput(0, mleft.left().node());
1542 node->ReplaceInput(1,
1543 Uint32Constant(std::numeric_limits<uint32_t>::max()
1544 << m.right().ResolvedValue()));
1545 NodeProperties::ChangeOp(node, machine()->Word32And());
1546 return Changed(node).FollowedBy(ReduceWord32And(node));
1547 }
1548 }
1549 }
1550 return ReduceWord32Shifts(node);
1551}
1552
1553Reduction MachineOperatorReducer::ReduceWord64Shl(Node* node) {
1554 DCHECK_EQ(IrOpcode::kWord64Shl, node->opcode())((void) 0);
1555 Int64BinopMatcher m(node);
1556 if (m.right().Is(0)) return Replace(m.left().node()); // x << 0 => x
1557 if (m.IsFoldable()) { // K << K => K (K stands for arbitrary constants)
1558 return ReplaceInt64(base::ShlWithWraparound(m.left().ResolvedValue(),
1559 m.right().ResolvedValue()));
1560 }
1561 if (m.right().IsInRange(1, 63) &&
1562 (m.left().IsWord64Sar() || m.left().IsWord64Shr())) {
1563 Int64BinopMatcher mleft(m.left().node());
1564
1565 // If x >> K only shifted out zeros:
1566 // (x >> K) << L => x if K == L
1567 // (x >> K) << L => x >> (K-L) if K > L
1568 // (x >> K) << L => x << (L-K) if K < L
1569 // Since this is used for Smi untagging, we currently only need it for
1570 // signed shifts.
1571 if (mleft.op() == machine()->Word64SarShiftOutZeros() &&
1572 mleft.right().IsInRange(1, 63)) {
1573 Node* x = mleft.left().node();
1574 int64_t k = mleft.right().ResolvedValue();
1575 int64_t l = m.right().ResolvedValue();
1576 if (k == l) {
1577 return Replace(x);
1578 } else if (k > l) {
1579 node->ReplaceInput(0, x);
1580 node->ReplaceInput(1, Uint64Constant(k - l));
1581 NodeProperties::ChangeOp(node, machine()->Word64Sar());
1582 return Changed(node).FollowedBy(ReduceWord64Sar(node));
1583 } else {
1584 DCHECK(k < l)((void) 0);
1585 node->ReplaceInput(0, x);
1586 node->ReplaceInput(1, Uint64Constant(l - k));
1587 return Changed(node);
1588 }
1589 }
1590
1591 // (x >>> K) << K => x & ~(2^K - 1)
1592 // (x >> K) << K => x & ~(2^K - 1)
1593 if (mleft.right().Is(m.right().ResolvedValue())) {
1594 node->ReplaceInput(0, mleft.left().node());
1595 node->ReplaceInput(1, Uint64Constant(std::numeric_limits<uint64_t>::max()
1596 << m.right().ResolvedValue()));
1597 NodeProperties::ChangeOp(node, machine()->Word64And());
1598 return Changed(node).FollowedBy(ReduceWord64And(node));
1599 }
1600 }
1601 return NoChange();
1602}
1603
1604Reduction MachineOperatorReducer::ReduceWord32Shr(Node* node) {
1605 Uint32BinopMatcher m(node);
1606 if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x
1607 if (m.IsFoldable()) { // K >>> K => K (K stands for arbitrary constants)
1608 return ReplaceInt32(m.left().ResolvedValue() >>
1609 (m.right().ResolvedValue() & 31));
1610 }
1611 if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
1612 Uint32BinopMatcher mleft(m.left().node());
1613 if (mleft.right().HasResolvedValue()) {
1614 uint32_t shift = m.right().ResolvedValue() & 31;
1615 uint32_t mask = mleft.right().ResolvedValue();
1616 if ((mask >> shift) == 0) {
1617 // (m >>> s) == 0 implies ((x & m) >>> s) == 0
1618 return ReplaceInt32(0);
1619 }
1620 }
1621 }
1622 return ReduceWord32Shifts(node);
1623}
1624
1625Reduction MachineOperatorReducer::ReduceWord64Shr(Node* node) {
1626 DCHECK_EQ(IrOpcode::kWord64Shr, node->opcode())((void) 0);
1627 Uint64BinopMatcher m(node);
1628 if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x
1629 if (m.IsFoldable()) { // K >> K => K (K stands for arbitrary constants)
1630 return ReplaceInt64(m.left().ResolvedValue() >>
1631 (m.right().ResolvedValue() & 63));
1632 }
1633 return NoChange();
1634}
1635
1636Reduction MachineOperatorReducer::ReduceWord32Sar(Node* node) {
1637 Int32BinopMatcher m(node);
1638 if (m.right().Is(0)) return Replace(m.left().node()); // x >> 0 => x
1639 if (m.IsFoldable()) { // K >> K => K (K stands for arbitrary constants)
1640 return ReplaceInt32(m.left().ResolvedValue() >>
1641 (m.right().ResolvedValue() & 31));
1642 }
1643 if (m.left().IsWord32Shl()) {
1644 Int32BinopMatcher mleft(m.left().node());
1645 if (mleft.left().IsComparison()) {
1646 if (m.right().Is(31) && mleft.right().Is(31)) {
1647 // Comparison << 31 >> 31 => 0 - Comparison
1648 node->ReplaceInput(0, Int32Constant(0));
1649 node->ReplaceInput(1, mleft.left().node());
1650 NodeProperties::ChangeOp(node, machine()->Int32Sub());
1651 return Changed(node).FollowedBy(ReduceInt32Sub(node));
1652 }
1653 } else if (mleft.left().IsLoad()) {
1654 LoadRepresentation const rep =
1655 LoadRepresentationOf(mleft.left().node()->op());
1656 if (m.right().Is(24) && mleft.right().Is(24) &&
1657 rep == MachineType::Int8()) {
1658 // Load[kMachInt8] << 24 >> 24 => Load[kMachInt8]
1659 return Replace(mleft.left().node());
1660 }
1661 if (m.right().Is(16) && mleft.right().Is(16) &&
1662 rep == MachineType::Int16()) {
1663 // Load[kMachInt16] << 16 >> 16 => Load[kMachInt8]
1664 return Replace(mleft.left().node());
1665 }
1666 }
1667 }
1668 return ReduceWord32Shifts(node);
1669}
1670
1671Reduction MachineOperatorReducer::ReduceWord64Sar(Node* node) {
1672 Int64BinopMatcher m(node);
1673 if (m.right().Is(0)) return Replace(m.left().node()); // x >> 0 => x
1674 if (m.IsFoldable()) {
1675 return ReplaceInt64(m.left().ResolvedValue() >>
1676 (m.right().ResolvedValue() & 63));
1677 }
1678 return NoChange();
1679}
1680
1681template <typename WordNAdapter>
1682Reduction MachineOperatorReducer::ReduceWordNAnd(Node* node) {
1683 using A = WordNAdapter;
1684 A a(this);
1685
1686 typename A::IntNBinopMatcher m(node);
1687 if (m.right().Is(0)) return Replace(m.right().node()); // x & 0 => 0
1688 if (m.right().Is(-1)) return Replace(m.left().node()); // x & -1 => x
1689 if (m.left().IsComparison() && m.right().Is(1)) { // CMP & 1 => CMP
1690 return Replace(m.left().node());
1691 }
1692 if (m.IsFoldable()) { // K & K => K (K stands for arbitrary constants)
1693 return a.ReplaceIntN(m.left().ResolvedValue() & m.right().ResolvedValue());
1694 }
1695 if (m.LeftEqualsRight()) return Replace(m.left().node()); // x & x => x
1696 if (A::IsWordNAnd(m.left()) && m.right().HasResolvedValue()) {
1697 typename A::IntNBinopMatcher mleft(m.left().node());
1698 if (mleft.right().HasResolvedValue()) { // (x & K) & K => x & K
1699 node->ReplaceInput(0, mleft.left().node());
1700 node->ReplaceInput(1, a.IntNConstant(m.right().ResolvedValue() &
1701 mleft.right().ResolvedValue()));
1702 return Changed(node).FollowedBy(a.ReduceWordNAnd(node));
1703 }
1704 }
1705 if (m.right().IsNegativePowerOf2()) {
1706 typename A::intN_t const mask = m.right().ResolvedValue();
1707 typename A::intN_t const neg_mask = base::NegateWithWraparound(mask);
1708 if (A::IsWordNShl(m.left())) {
1709 typename A::UintNBinopMatcher mleft(m.left().node());
1710 if (mleft.right().HasResolvedValue() &&
1711 (mleft.right().ResolvedValue() & (A::WORD_SIZE - 1)) >=
1712 base::bits::CountTrailingZeros(mask)) {
1713 // (x << L) & (-1 << K) => x << L iff L >= K
1714 return Replace(mleft.node());
1715 }
1716 } else if (A::IsIntNAdd(m.left())) {
1717 typename A::IntNBinopMatcher mleft(m.left().node());
1718 if (mleft.right().HasResolvedValue() &&
1719 (mleft.right().ResolvedValue() & mask) ==
1720 mleft.right().ResolvedValue()) {
1721 // (x + (K << L)) & (-1 << L) => (x & (-1 << L)) + (K << L)
1722 node->ReplaceInput(0,
1723 a.WordNAnd(mleft.left().node(), m.right().node()));
1724 node->ReplaceInput(1, mleft.right().node());
1725 NodeProperties::ChangeOp(node, a.IntNAdd(machine()));
1726 return Changed(node).FollowedBy(a.ReduceIntNAdd(node));
1727 }
1728 if (A::IsIntNMul(mleft.left())) {
1729 typename A::IntNBinopMatcher mleftleft(mleft.left().node());
1730 if (mleftleft.right().IsMultipleOf(neg_mask)) {
1731 // (y * (K << L) + x) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
1732 node->ReplaceInput(
1733 0, a.WordNAnd(mleft.right().node(), m.right().node()));
1734 node->ReplaceInput(1, mleftleft.node());
1735 NodeProperties::ChangeOp(node, a.IntNAdd(machine()));
1736 return Changed(node).FollowedBy(a.ReduceIntNAdd(node));
1737 }
1738 }
1739 if (A::IsIntNMul(mleft.right())) {
1740 typename A::IntNBinopMatcher mleftright(mleft.right().node());
1741 if (mleftright.right().IsMultipleOf(neg_mask)) {
1742 // (x + y * (K << L)) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
1743 node->ReplaceInput(0,
1744 a.WordNAnd(mleft.left().node(), m.right().node()));
1745 node->ReplaceInput(1, mleftright.node());
1746 NodeProperties::ChangeOp(node, a.IntNAdd(machine()));
1747 return Changed(node).FollowedBy(a.ReduceIntNAdd(node));
1748 }
1749 }
1750 if (A::IsWordNShl(mleft.left())) {
1751 typename A::IntNBinopMatcher mleftleft(mleft.left().node());
1752 if (mleftleft.right().Is(base::bits::CountTrailingZeros(mask))) {
1753 // (y << L + x) & (-1 << L) => (x & (-1 << L)) + y << L
1754 node->ReplaceInput(
1755 0, a.WordNAnd(mleft.right().node(), m.right().node()));
1756 node->ReplaceInput(1, mleftleft.node());
1757 NodeProperties::ChangeOp(node, a.IntNAdd(machine()));
1758 return Changed(node).FollowedBy(a.ReduceIntNAdd(node));
1759 }
1760 }
1761 if (A::IsWordNShl(mleft.right())) {
1762 typename A::IntNBinopMatcher mleftright(mleft.right().node());
1763 if (mleftright.right().Is(base::bits::CountTrailingZeros(mask))) {
1764 // (x + y << L) & (-1 << L) => (x & (-1 << L)) + y << L
1765 node->ReplaceInput(0,
1766 a.WordNAnd(mleft.left().node(), m.right().node()));
1767 node->ReplaceInput(1, mleftright.node());
1768 NodeProperties::ChangeOp(node, a.IntNAdd(machine()));
1769 return Changed(node).FollowedBy(a.ReduceIntNAdd(node));
1770 }
1771 }
1772 } else if (A::IsIntNMul(m.left())) {
1773 typename A::IntNBinopMatcher mleft(m.left().node());
1774 if (mleft.right().IsMultipleOf(neg_mask)) {
1775 // (x * (K << L)) & (-1 << L) => x * (K << L)
1776 return Replace(mleft.node());
1777 }
1778 }
1779 }
1780 return NoChange();
1781}
1782
1783namespace {
1784
1785// Represents an operation of the form `(source & mask) == masked_value`.
1786// where each bit set in masked_value also has to be set in mask.
1787struct BitfieldCheck {
1788 Node* const source;
1789 uint32_t const mask;
1790 uint32_t const masked_value;
1791 bool const truncate_from_64_bit;
1792
1793 BitfieldCheck(Node* source, uint32_t mask, uint32_t masked_value,
1794 bool truncate_from_64_bit)
1795 : source(source),
1796 mask(mask),
1797 masked_value(masked_value),
1798 truncate_from_64_bit(truncate_from_64_bit) {
1799 CHECK_EQ(masked_value & ~mask, 0)do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base
::pass_value_or_ref<decltype(masked_value & ~mask)>
::type, typename ::v8::base::pass_value_or_ref<decltype(0)
>::type>((masked_value & ~mask), (0)); do { if ((__builtin_expect
(!!(!(_cmp)), 0))) { V8_Fatal("Check failed: %s.", "masked_value & ~mask"
" " "==" " " "0"); } } while (false); } while (false)
;
1800 }
1801
1802 static base::Optional<BitfieldCheck> Detect(Node* node) {
1803 // There are two patterns to check for here:
1804 // 1. Single-bit checks: `(val >> shift) & 1`, where:
1805 // - the shift may be omitted, and/or
1806 // - the result may be truncated from 64 to 32
1807 // 2. Equality checks: `(val & mask) == expected`, where:
1808 // - val may be truncated from 64 to 32 before masking (see
1809 // ReduceWord32EqualForConstantRhs)
1810 if (node->opcode() == IrOpcode::kWord32Equal) {
1811 Uint32BinopMatcher eq(node);
1812 if (eq.left().IsWord32And()) {
1813 Uint32BinopMatcher mand(eq.left().node());
1814 if (mand.right().HasResolvedValue() && eq.right().HasResolvedValue()) {
1815 uint32_t mask = mand.right().ResolvedValue();
1816 uint32_t masked_value = eq.right().ResolvedValue();
1817 if ((masked_value & ~mask) != 0) return {};
1818 if (mand.left().IsTruncateInt64ToInt32()) {
1819 return BitfieldCheck(
1820 NodeProperties::GetValueInput(mand.left().node(), 0), mask,
1821 masked_value, true);
1822 } else {
1823 return BitfieldCheck(mand.left().node(), mask, masked_value, false);
1824 }
1825 }
1826 }
1827 } else {
1828 if (node->opcode() == IrOpcode::kTruncateInt64ToInt32) {
1829 return TryDetectShiftAndMaskOneBit<Word64Adapter>(
1830 NodeProperties::GetValueInput(node, 0));
1831 } else {
1832 return TryDetectShiftAndMaskOneBit<Word32Adapter>(node);
1833 }
1834 }
1835 return {};
1836 }
1837
1838 base::Optional<BitfieldCheck> TryCombine(const BitfieldCheck& other) {
1839 if (source != other.source ||
1840 truncate_from_64_bit != other.truncate_from_64_bit)
1841 return {};
1842 uint32_t overlapping_bits = mask & other.mask;
1843 // It would be kind of strange to have any overlapping bits, but they can be
1844 // allowed as long as they don't require opposite values in the same
1845 // positions.
1846 if ((masked_value & overlapping_bits) !=
1847 (other.masked_value & overlapping_bits))
1848 return {};
1849 return BitfieldCheck{source, mask | other.mask,
1850 masked_value | other.masked_value,
1851 truncate_from_64_bit};
1852 }
1853
1854 private:
1855 template <typename WordNAdapter>
1856 static base::Optional<BitfieldCheck> TryDetectShiftAndMaskOneBit(Node* node) {
1857 // Look for the pattern `(val >> shift) & 1`. The shift may be omitted.
1858 if (WordNAdapter::IsWordNAnd(NodeMatcher(node))) {
1859 typename WordNAdapter::IntNBinopMatcher mand(node);
1860 if (mand.right().HasResolvedValue() &&
1861 mand.right().ResolvedValue() == 1) {
1862 if (WordNAdapter::IsWordNShr(mand.left()) ||
1863 WordNAdapter::IsWordNSar(mand.left())) {
1864 typename WordNAdapter::UintNBinopMatcher shift(mand.left().node());
1865 if (shift.right().HasResolvedValue() &&
1866 shift.right().ResolvedValue() < 32u) {
1867 uint32_t mask = 1 << shift.right().ResolvedValue();
1868 return BitfieldCheck{shift.left().node(), mask, mask,
1869 WordNAdapter::WORD_SIZE == 64};
1870 }
1871 }
1872 return BitfieldCheck{mand.left().node(), 1, 1,
1873 WordNAdapter::WORD_SIZE == 64};
1874 }
1875 }
1876 return {};
1877 }
1878};
1879
1880} // namespace
1881
1882Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
1883 DCHECK_EQ(IrOpcode::kWord32And, node->opcode())((void) 0);
1884 Reduction reduction = ReduceWordNAnd<Word32Adapter>(node);
1885 if (reduction.Changed()) {
1886 return reduction;
1887 }
1888
1889 // Attempt to detect multiple bitfield checks from the same bitfield struct
1890 // and fold them into a single check.
1891 Int32BinopMatcher m(node);
1892 if (auto right_bitfield = BitfieldCheck::Detect(m.right().node())) {
1893 if (auto left_bitfield = BitfieldCheck::Detect(m.left().node())) {
1894 if (auto combined_bitfield = left_bitfield->TryCombine(*right_bitfield)) {
1895 Node* source = combined_bitfield->source;
1896 if (combined_bitfield->truncate_from_64_bit) {
1897 source = TruncateInt64ToInt32(source);
1898 }
1899 node->ReplaceInput(0, Word32And(source, combined_bitfield->mask));
1900 node->ReplaceInput(1, Int32Constant(combined_bitfield->masked_value));
1901 NodeProperties::ChangeOp(node, machine()->Word32Equal());
1902 return Changed(node).FollowedBy(ReduceWord32Equal(node));
1903 }
1904 }
1905 }
1906
1907 return NoChange();
1908}
1909
1910Reduction MachineOperatorReducer::ReduceWord64And(Node* node) {
1911 DCHECK_EQ(IrOpcode::kWord64And, node->opcode())((void) 0);
1912 return ReduceWordNAnd<Word64Adapter>(node);
1913}
1914
1915Reduction MachineOperatorReducer::TryMatchWord32Ror(Node* node) {
1916 // Recognize rotation, we are matching and transforming as follows:
1917 // x << y | x >>> (32 - y) => x ror (32 - y)
1918 // x << (32 - y) | x >>> y => x ror y
1919 // x << y ^ x >>> (32 - y) => x ror (32 - y) if y & 31 != 0
1920 // x << (32 - y) ^ x >>> y => x ror y if y & 31 != 0
1921 // (As well as the commuted forms.)
1922 // Note the side condition for XOR: the optimization doesn't hold for
1923 // multiples of 32.
1924
1925 DCHECK(IrOpcode::kWord32Or == node->opcode() ||((void) 0)
1926 IrOpcode::kWord32Xor == node->opcode())((void) 0);
1927 Int32BinopMatcher m(node);
1928 Node* shl = nullptr;
1929 Node* shr = nullptr;
1930 if (m.left().IsWord32Shl() && m.right().IsWord32Shr()) {
1931 shl = m.left().node();
1932 shr = m.right().node();
1933 } else if (m.left().IsWord32Shr() && m.right().IsWord32Shl()) {
1934 shl = m.right().node();
1935 shr = m.left().node();
1936 } else {
1937 return NoChange();
1938 }
1939
1940 Int32BinopMatcher mshl(shl);
1941 Int32BinopMatcher mshr(shr);
1942 if (mshl.left().node() != mshr.left().node()) return NoChange();
1943
1944 if (mshl.right().HasResolvedValue() && mshr.right().HasResolvedValue()) {
1945 // Case where y is a constant.
1946 if (mshl.right().ResolvedValue() + mshr.right().ResolvedValue() != 32) {
1947 return NoChange();
1948 }
1949 if (node->opcode() == IrOpcode::kWord32Xor &&
1950 (mshl.right().ResolvedValue() & 31) == 0) {
1951 return NoChange();
1952 }
1953 } else {
1954 Node* sub = nullptr;
1955 Node* y = nullptr;
1956 if (mshl.right().IsInt32Sub()) {
1957 sub = mshl.right().node();
1958 y = mshr.right().node();
1959 } else if (mshr.right().IsInt32Sub()) {
1960 sub = mshr.right().node();
1961 y = mshl.right().node();
1962 } else {
1963 return NoChange();
1964 }
1965
1966 Int32BinopMatcher msub(sub);
1967 if (!msub.left().Is(32) || msub.right().node() != y) return NoChange();
1968 if (node->opcode() == IrOpcode::kWord32Xor) {
1969 return NoChange(); // Can't guarantee y & 31 != 0.
1970 }
1971 }
1972
1973 node->ReplaceInput(0, mshl.left().node());
1974 node->ReplaceInput(1, mshr.right().node());
1975 NodeProperties::ChangeOp(node, machine()->Word32Ror());
1976 return Changed(node);
1977}
1978
1979template <typename WordNAdapter>
1980Reduction MachineOperatorReducer::ReduceWordNOr(Node* node) {
1981 using A = WordNAdapter;
1982 A a(this);
1983
1984 typename A::IntNBinopMatcher m(node);
1985 if (m.right().Is(0)) return Replace(m.left().node()); // x | 0 => x
1986 if (m.right().Is(-1)) return Replace(m.right().node()); // x | -1 => -1
1987 if (m.IsFoldable()) { // K | K => K (K stands for arbitrary constants)
1988 return a.ReplaceIntN(m.left().ResolvedValue() | m.right().ResolvedValue());
1989 }
1990 if (m.LeftEqualsRight()) return Replace(m.left().node()); // x | x => x
1991
1992 // (x & K1) | K2 => x | K2 if K2 has ones for every zero bit in K1.
1993 // This case can be constructed by UpdateWord and UpdateWord32 in CSA.
1994 if (m.right().HasResolvedValue()) {
1995 if (A::IsWordNAnd(m.left())) {
1996 typename A::IntNBinopMatcher mand(m.left().node());
1997 if (mand.right().HasResolvedValue()) {
1998 if ((m.right().ResolvedValue() | mand.right().ResolvedValue()) == -1) {
1999 node->ReplaceInput(0, mand.left().node());
2000 return Changed(node);
2001 }
2002 }
2003 }
2004 }
2005
2006 return a.TryMatchWordNRor(node);
2007}
2008
2009Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
2010 DCHECK_EQ(IrOpcode::kWord32Or, node->opcode())((void) 0);
2011 return ReduceWordNOr<Word32Adapter>(node);
2012}
2013
2014Reduction MachineOperatorReducer::ReduceWord64Or(Node* node) {
2015 DCHECK_EQ(IrOpcode::kWord64Or, node->opcode())((void) 0);
2016 return ReduceWordNOr<Word64Adapter>(node);
2017}
2018
2019template <typename WordNAdapter>
2020Reduction MachineOperatorReducer::ReduceWordNXor(Node* node) {
2021 using A = WordNAdapter;
2022 A a(this);
2023
2024 typename A::IntNBinopMatcher m(node);
2025 if (m.right().Is(0)) return Replace(m.left().node()); // x ^ 0 => x
2026 if (m.IsFoldable()) { // K ^ K => K (K stands for arbitrary constants)
2027 return a.ReplaceIntN(m.left().ResolvedValue() ^ m.right().ResolvedValue());
2028 }
2029 if (m.LeftEqualsRight()) return ReplaceInt32(0); // x ^ x => 0
2030 if (A::IsWordNXor(m.left()) && m.right().Is(-1)) {
2031 typename A::IntNBinopMatcher mleft(m.left().node());
2032 if (mleft.right().Is(-1)) { // (x ^ -1) ^ -1 => x
2033 return Replace(mleft.left().node());
2034 }
2035 }
2036
2037 return a.TryMatchWordNRor(node);
2038}
2039
2040Reduction MachineOperatorReducer::ReduceWord32Xor(Node* node) {
2041 DCHECK_EQ(IrOpcode::kWord32Xor, node->opcode())((void) 0);
2042 Int32BinopMatcher m(node);
2043 if (m.right().IsWord32Equal() && m.left().Is(1)) {
2044 return Replace(Word32Equal(m.right().node(), Int32Constant(0)));
2045 }
2046 return ReduceWordNXor<Word32Adapter>(node);
2047}
2048
2049Reduction MachineOperatorReducer::ReduceWord64Xor(Node* node) {
2050 DCHECK_EQ(IrOpcode::kWord64Xor, node->opcode())((void) 0);
2051 return ReduceWordNXor<Word64Adapter>(node);
2052}
2053
2054Reduction MachineOperatorReducer::ReduceWord32Equal(Node* node) {
2055 Int32BinopMatcher m(node);
2056 if (m.IsFoldable()) { // K == K => K (K stands for arbitrary constants)
2057 return ReplaceBool(m.left().ResolvedValue() == m.right().ResolvedValue());
2058 }
2059 if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y == 0 => x == y
2060 Int32BinopMatcher msub(m.left().node());
2061 node->ReplaceInput(0, msub.left().node());
2062 node->ReplaceInput(1, msub.right().node());
2063 return Changed(node);
2064 }
2065 // TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
2066 if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true
2067 if (m.right().HasResolvedValue()) {
2068 base::Optional<std::pair<Node*, uint32_t>> replacements;
2069 if (m.left().IsTruncateInt64ToInt32()) {
2070 replacements = ReduceWord32EqualForConstantRhs<Word64Adapter>(
2071 NodeProperties::GetValueInput(m.left().node(), 0),
2072 static_cast<uint32_t>(m.right().ResolvedValue()));
2073 } else {
2074 replacements = ReduceWord32EqualForConstantRhs<Word32Adapter>(
2075 m.left().node(), static_cast<uint32_t>(m.right().ResolvedValue()));
2076 }
2077 if (replacements) {
2078 node->ReplaceInput(0, replacements->first);
2079 node->ReplaceInput(1, Uint32Constant(replacements->second));
2080 return Changed(node);
2081 }
2082 }
2083 // This is a workaround for not having escape analysis for wasm
2084 // (machine-level) turbofan graphs.
2085 if (!ObjectsMayAlias(m.left().node(), m.right().node())) {
2086 return ReplaceBool(false);
2087 }
2088
2089 return NoChange();
2090}
2091
2092Reduction MachineOperatorReducer::ReduceFloat64InsertLowWord32(Node* node) {
2093 DCHECK_EQ(IrOpcode::kFloat64InsertLowWord32, node->opcode())((void) 0);
2094 Float64Matcher mlhs(node->InputAt(0));
2095 Uint32Matcher mrhs(node->InputAt(1));
2096 if (mlhs.HasResolvedValue() && mrhs.HasResolvedValue()) {
2097 return ReplaceFloat64(
2098 bit_cast<double>((bit_cast<uint64_t>(mlhs.ResolvedValue()) &
2099 uint64_t{0xFFFFFFFF00000000}) |
2100 mrhs.ResolvedValue()));
2101 }
2102 return NoChange();
2103}
2104
2105Reduction MachineOperatorReducer::ReduceFloat64InsertHighWord32(Node* node) {
2106 DCHECK_EQ(IrOpcode::kFloat64InsertHighWord32, node->opcode())((void) 0);
2107 Float64Matcher mlhs(node->InputAt(0));
2108 Uint32Matcher mrhs(node->InputAt(1));
2109 if (mlhs.HasResolvedValue() && mrhs.HasResolvedValue()) {
2110 return ReplaceFloat64(bit_cast<double>(
2111 (bit_cast<uint64_t>(mlhs.ResolvedValue()) & uint64_t{0xFFFFFFFF}) |
2112 (static_cast<uint64_t>(mrhs.ResolvedValue()) << 32)));
2113 }
2114 return NoChange();
2115}
2116
2117namespace {
2118
2119bool IsFloat64RepresentableAsFloat32(const Float64Matcher& m) {
2120 if (m.HasResolvedValue()) {
2121 double v = m.ResolvedValue();
2122 return DoubleToFloat32(v) == v;
2123 }
2124 return false;
2125}
2126
2127} // namespace
2128
2129Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
2130 DCHECK(IrOpcode::kFloat64Equal == node->opcode() ||((void) 0)
2131 IrOpcode::kFloat64LessThan == node->opcode() ||((void) 0)
2132 IrOpcode::kFloat64LessThanOrEqual == node->opcode())((void) 0);
2133 Float64BinopMatcher m(node);
2134 if (m.IsFoldable()) {
2135 switch (node->opcode()) {
2136 case IrOpcode::kFloat64Equal:
2137 return ReplaceBool(m.left().ResolvedValue() ==
2138 m.right().ResolvedValue());
2139 case IrOpcode::kFloat64LessThan:
2140 return ReplaceBool(m.left().ResolvedValue() <
2141 m.right().ResolvedValue());
2142 case IrOpcode::kFloat64LessThanOrEqual:
2143 return ReplaceBool(m.left().ResolvedValue() <=
2144 m.right().ResolvedValue());
2145 default:
2146 UNREACHABLE()V8_Fatal("unreachable code");
2147 }
2148 } else if ((m.left().IsChangeFloat32ToFloat64() &&
2149 m.right().IsChangeFloat32ToFloat64()) ||
2150 (m.left().IsChangeFloat32ToFloat64() &&
2151 IsFloat64RepresentableAsFloat32(m.right())) ||
2152 (IsFloat64RepresentableAsFloat32(m.left()) &&
2153 m.right().IsChangeFloat32ToFloat64())) {
2154 // As all Float32 values have an exact representation in Float64, comparing
2155 // two Float64 values both converted from Float32 is equivalent to comparing
2156 // the original Float32s, so we can ignore the conversions. We can also
2157 // reduce comparisons of converted Float64 values against constants that
2158 // can be represented exactly as Float32.
2159 switch (node->opcode()) {
2160 case IrOpcode::kFloat64Equal:
2161 NodeProperties::ChangeOp(node, machine()->Float32Equal());
2162 break;
2163 case IrOpcode::kFloat64LessThan:
2164 NodeProperties::ChangeOp(node, machine()->Float32LessThan());
2165 break;
2166 case IrOpcode::kFloat64LessThanOrEqual:
2167 NodeProperties::ChangeOp(node, machine()->Float32LessThanOrEqual());
2168 break;
2169 default:
2170 UNREACHABLE()V8_Fatal("unreachable code");
2171 }
2172 node->ReplaceInput(
2173 0, m.left().HasResolvedValue()
2174 ? Float32Constant(static_cast<float>(m.left().ResolvedValue()))
2175 : m.left().InputAt(0));
2176 node->ReplaceInput(
2177 1, m.right().HasResolvedValue()
2178 ? Float32Constant(static_cast<float>(m.right().ResolvedValue()))
2179 : m.right().InputAt(0));
2180 return Changed(node);
2181 }
2182 return NoChange();
2183}
2184
2185Reduction MachineOperatorReducer::ReduceFloat64RoundDown(Node* node) {
2186 DCHECK_EQ(IrOpcode::kFloat64RoundDown, node->opcode())((void) 0);
2187 Float64Matcher m(node->InputAt(0));
2188 if (m.HasResolvedValue()) {
2189 return ReplaceFloat64(std::floor(m.ResolvedValue()));
2190 }
2191 return NoChange();
2192}
2193
2194namespace {
2195
2196// Returns true if |node| is a constant whose value is 0.
2197bool IsZero(Node* node) {
2198 switch (node->opcode()) {
2199#define CASE_IS_ZERO(opcode, matcher) \
2200 case IrOpcode::opcode: { \
2201 matcher m(node); \
2202 return m.Is(0); \
2203 }
2204 CASE_IS_ZERO(kInt32Constant, Int32Matcher)
2205 CASE_IS_ZERO(kInt64Constant, Int64Matcher)
2206#undef CASE_IS_ZERO
2207 default:
2208 break;
2209 }
2210 return false;
2211}
2212
2213// If |node| is of the form "x == 0", then return "x" (in order to remove the
2214// "== 0" part).
2215base::Optional<Node*> TryGetInvertedCondition(Node* cond) {
2216 if (cond->opcode() == IrOpcode::kWord32Equal) {
2217 Int32BinopMatcher m(cond);
2218 if (IsZero(m.right().node())) {
2219 return m.left().node();
2220 }
2221 }
2222 return base::nullopt;
2223}
2224
2225struct SimplifiedCondition {
2226 Node* condition;
2227 bool is_inverted;
2228};
2229
2230// Tries to simplifies |cond| by removing all top-level "== 0". Everytime such a
2231// construction is removed, the meaning of the comparison is inverted. This is
2232// recorded by the variable |is_inverted| throughout this function, and returned
2233// at the end. If |is_inverted| is true at the end, the caller should invert the
2234// if/else branches following the comparison.
2235base::Optional<SimplifiedCondition> TrySimplifyCompareZero(Node* cond) {
2236 bool is_inverted = false;
2237 bool changed = false;
2238 base::Optional<Node*> new_cond;
2239 while ((new_cond = TryGetInvertedCondition(cond)).has_value()) {
2240 cond = *new_cond;
2241 is_inverted = !is_inverted;
2242 changed = true;
2243 }
2244 if (changed) {
2245 return SimplifiedCondition{cond, is_inverted};
2246 } else {
2247 return {};
2248 }
2249}
2250
2251} // namespace
2252
2253void MachineOperatorReducer::SwapBranches(Node* node) {
2254 DCHECK_EQ(node->opcode(), IrOpcode::kBranch)((void) 0);
2255 for (Node* const use : node->uses()) {
2256 switch (use->opcode()) {
2257 case IrOpcode::kIfTrue:
2258 NodeProperties::ChangeOp(use, common()->IfFalse());
2259 break;
2260 case IrOpcode::kIfFalse:
2261 NodeProperties::ChangeOp(use, common()->IfTrue());
2262 break;
2263 default:
2264 UNREACHABLE()V8_Fatal("unreachable code");
2265 }
2266 }
2267 NodeProperties::ChangeOp(
2268 node, common()->Branch(NegateBranchHint(BranchHintOf(node->op()))));
2269}
2270
2271// If |node| is a branch, removes all top-level 32-bit "== 0" from |node|.
2272Reduction MachineOperatorReducer::SimplifyBranch(Node* node) {
2273 Node* cond = node->InputAt(0);
2274 if (auto simplified = TrySimplifyCompareZero(cond)) {
2275 node->ReplaceInput(0, simplified->condition);
2276 if (simplified->is_inverted) {
2277 switch (node->opcode()) {
2278 case IrOpcode::kBranch:
2279 SwapBranches(node);
2280 break;
2281 case IrOpcode::kTrapIf:
2282 NodeProperties::ChangeOp(node,
2283 common()->TrapUnless(TrapIdOf(node->op())));
2284 break;
2285 case IrOpcode::kTrapUnless:
2286 NodeProperties::ChangeOp(node,
2287 common()->TrapIf(TrapIdOf(node->op())));
2288 break;
2289 case IrOpcode::kDeoptimizeIf: {
2290 DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
2291 NodeProperties::ChangeOp(
2292 node, common()->DeoptimizeUnless(p.reason(), p.feedback()));
2293 break;
2294 }
2295 case IrOpcode::kDeoptimizeUnless: {
2296 DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
2297 NodeProperties::ChangeOp(
2298 node, common()->DeoptimizeIf(p.reason(), p.feedback()));
2299 break;
2300 }
2301 default:
2302
2303 UNREACHABLE()V8_Fatal("unreachable code");
2304 }
2305 }
2306 return Changed(node);
2307 }
2308 return NoChange();
2309}
2310
2311Reduction MachineOperatorReducer::ReduceConditional(Node* node) {
2312 DCHECK(node->opcode() == IrOpcode::kBranch ||((void) 0)
2313 node->opcode() == IrOpcode::kDeoptimizeIf ||((void) 0)
2314 node->opcode() == IrOpcode::kDeoptimizeUnless ||((void) 0)
2315 node->opcode() == IrOpcode::kTrapIf ||((void) 0)
2316 node->opcode() == IrOpcode::kTrapUnless)((void) 0);
2317 // This reducer only applies operator reductions to the branch condition.
2318 // Reductions involving control flow happen elsewhere. Non-zero inputs are
2319 // considered true in all conditional ops.
2320 NodeMatcher condition(NodeProperties::GetValueInput(node, 0));
2321 Reduction reduction = NoChange();
2322 if (condition.IsTruncateInt64ToInt32()) {
2323 if (auto replacement =
2324 ReduceConditionalN<Word64Adapter>(condition.node())) {
2325 NodeProperties::ReplaceValueInput(node, *replacement, 0);
2326 reduction = Changed(node);
2327 }
2328 } else if (auto replacement = ReduceConditionalN<Word32Adapter>(node)) {
2329 NodeProperties::ReplaceValueInput(node, *replacement, 0);
2330 reduction = Changed(node);
2331 }
2332 return reduction.FollowedBy(SimplifyBranch(node));
2333}
2334
2335template <typename WordNAdapter>
2336base::Optional<Node*> MachineOperatorReducer::ReduceConditionalN(Node* node) {
2337 NodeMatcher condition(NodeProperties::GetValueInput(node, 0));
2338 // Branch conditions are 32-bit comparisons against zero, so they are the
2339 // opposite of a 32-bit `x == 0` node. To avoid repetition, we can reuse logic
2340 // for Word32Equal: if `x == 0` can reduce to `y == 0`, then branch(x) can
2341 // reduce to branch(y).
2342 auto replacements =
2343 ReduceWord32EqualForConstantRhs<WordNAdapter>(condition.node(), 0);
2344 if (replacements && replacements->second == 0) return replacements->first;
2345 return {};
2346}
2347
2348template <typename WordNAdapter>
2349base::Optional<std::pair<Node*, uint32_t>>
2350MachineOperatorReducer::ReduceWord32EqualForConstantRhs(Node* lhs,
2351 uint32_t rhs) {
2352 if (WordNAdapter::IsWordNAnd(NodeMatcher(lhs))) {
2353 typename WordNAdapter::UintNBinopMatcher mand(lhs);
2354 if ((WordNAdapter::IsWordNShr(mand.left()) ||
2355 WordNAdapter::IsWordNSar(mand.left())) &&
2356 mand.right().HasResolvedValue()) {
2357 typename WordNAdapter::UintNBinopMatcher mshift(mand.left().node());
2358 // ((x >> K1) & K2) == K3 => (x & (K2 << K1)) == (K3 << K1)
2359 if (mshift.right().HasResolvedValue()) {
2360 auto shift_bits = mshift.right().ResolvedValue();
2361 auto mask = mand.right().ResolvedValue();
2362 // Make sure that we won't shift data off the end, and that all of the
2363 // data ends up in the lower 32 bits for 64-bit mode.
2364 if (shift_bits <= base::bits::CountLeadingZeros(mask) &&
2365 shift_bits <= base::bits::CountLeadingZeros(rhs) &&
2366 mask << shift_bits <= std::numeric_limits<uint32_t>::max()) {
2367 Node* new_input = mshift.left().node();
2368 uint32_t new_mask = static_cast<uint32_t>(mask << shift_bits);
2369 uint32_t new_rhs = rhs << shift_bits;
2370 if (WordNAdapter::WORD_SIZE == 64) {
2371 // We can truncate before performing the And.
2372 new_input = TruncateInt64ToInt32(new_input);
2373 }
2374 return std::make_pair(Word32And(new_input, new_mask), new_rhs);
2375 }
2376 }
2377 }
2378 }
2379 // Replaces (x >> n) == k with x == k << n, with "k << n" being computed
2380 // here at compile time.
2381 if (lhs->op() == machine()->Word32SarShiftOutZeros() &&
2382 lhs->UseCount() == 1) {
2383 typename WordNAdapter::UintNBinopMatcher mshift(lhs);
2384 if (mshift.right().HasResolvedValue()) {
2385 int32_t shift = static_cast<int32_t>(mshift.right().ResolvedValue());
2386 if (CanRevertLeftShiftWithRightShift(rhs, shift)) {
2387 return std::make_pair(mshift.left().node(), rhs << shift);
2388 }
2389 }
2390 }
2391 return {};
2392}
2393
2394CommonOperatorBuilder* MachineOperatorReducer::common() const {
2395 return mcgraph()->common();
2396}
2397
2398MachineOperatorBuilder* MachineOperatorReducer::machine() const {
2399 return mcgraph()->machine();
2400}
2401
2402Graph* MachineOperatorReducer::graph() const { return mcgraph()->graph(); }
2403
2404} // namespace compiler
2405} // namespace internal
2406} // namespace v8

../deps/v8/src/base/bits.h

1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_BASE_BITS_H_
6#define V8_BASE_BITS_H_
7
8#include <stdint.h>
9#include <type_traits>
10
11#include "src/base/base-export.h"
12#include "src/base/macros.h"
13#if V8_CC_MSVC
14#include <intrin.h>
15#endif
16#if V8_OS_WIN32
17#include "src/base/win32-headers.h"
18#endif
19
20namespace v8 {
21namespace base {
22namespace bits {
23
24// CountPopulation(value) returns the number of bits set in |value|.
25template <typename T>
26constexpr inline
27 typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
28 unsigned>::type
29 CountPopulation(T value) {
30 STATIC_ASSERT(sizeof(T) <= 8)static_assert(sizeof(T) <= 8, "sizeof(T) <= 8");
31#if V8_HAS_BUILTIN_POPCOUNT(1)
32 return sizeof(T) == 8 ? __builtin_popcountll(static_cast<uint64_t>(value))
33 : __builtin_popcount(static_cast<uint32_t>(value));
34#else
35 // Fall back to divide-and-conquer popcount (see "Hacker's Delight" by Henry
36 // S. Warren, Jr.), chapter 5-1.
37 constexpr uint64_t mask[] = {0x5555555555555555, 0x3333333333333333,
38 0x0f0f0f0f0f0f0f0f};
39 // Start with 64 buckets of 1 bits, holding values from [0,1].
40 value = ((value >> 1) & mask[0]) + (value & mask[0]);
41 // Having 32 buckets of 2 bits, holding values from [0,2] now.
42 value = ((value >> 2) & mask[1]) + (value & mask[1]);
43 // Having 16 buckets of 4 bits, holding values from [0,4] now.
44 value = ((value >> 4) & mask[2]) + (value & mask[2]);
45 // Having 8 buckets of 8 bits, holding values from [0,8] now.
46 // From this point on, the buckets are bigger than the number of bits
47 // required to hold the values, and the buckets are bigger the maximum
48 // result, so there's no need to mask value anymore, since there's no
49 // more risk of overflow between buckets.
50 if (sizeof(T) > 1) value = (value >> (sizeof(T) > 1 ? 8 : 0)) + value;
51 // Having 4 buckets of 16 bits, holding values from [0,16] now.
52 if (sizeof(T) > 2) value = (value >> (sizeof(T) > 2 ? 16 : 0)) + value;
53 // Having 2 buckets of 32 bits, holding values from [0,32] now.
54 if (sizeof(T) > 4) value = (value >> (sizeof(T) > 4 ? 32 : 0)) + value;
55 // Having 1 buckets of 64 bits, holding values from [0,64] now.
56 return static_cast<unsigned>(value & 0xff);
57#endif
58}
59
60// ReverseBits(value) returns |value| in reverse bit order.
61template <typename T>
62T ReverseBits(T value) {
63 STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||static_assert((sizeof(value) == 1) || (sizeof(value) == 2) ||
(sizeof(value) == 4) || (sizeof(value) == 8), "(sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) || (sizeof(value) == 8)"
)
64 (sizeof(value) == 4) || (sizeof(value) == 8))static_assert((sizeof(value) == 1) || (sizeof(value) == 2) ||
(sizeof(value) == 4) || (sizeof(value) == 8), "(sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) || (sizeof(value) == 8)"
)
;
65 T result = 0;
66 for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
67 result = (result << 1) | (value & 1);
68 value >>= 1;
69 }
70 return result;
71}
72
73// CountLeadingZeros(value) returns the number of zero bits following the most
74// significant 1 bit in |value| if |value| is non-zero, otherwise it returns
75// {sizeof(T) * 8}.
76template <typename T, unsigned bits = sizeof(T) * 8>
77inline constexpr
78 typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
79 unsigned>::type
80 CountLeadingZeros(T value) {
81 static_assert(bits > 0, "invalid instantiation");
82#if V8_HAS_BUILTIN_CLZ(1)
83 return value == 0
84 ? bits
85 : bits == 64
86 ? __builtin_clzll(static_cast<uint64_t>(value))
87 : __builtin_clz(static_cast<uint32_t>(value)) - (32 - bits);
88#else
89 // Binary search algorithm taken from "Hacker's Delight" (by Henry S. Warren,
90 // Jr.), figures 5-11 and 5-12.
91 if (bits == 1) return static_cast<unsigned>(value) ^ 1;
92 T upper_half = value >> (bits / 2);
93 T next_value = upper_half != 0 ? upper_half : value;
94 unsigned add = upper_half != 0 ? 0 : bits / 2;
95 constexpr unsigned next_bits = bits == 1 ? 1 : bits / 2;
96 return CountLeadingZeros<T, next_bits>(next_value) + add;
97#endif
98}
99
100inline constexpr unsigned CountLeadingZeros32(uint32_t value) {
101 return CountLeadingZeros(value);
102}
103inline constexpr unsigned CountLeadingZeros64(uint64_t value) {
104 return CountLeadingZeros(value);
105}
106
107// CountTrailingZeros(value) returns the number of zero bits preceding the
108// least significant 1 bit in |value| if |value| is non-zero, otherwise it
109// returns {sizeof(T) * 8}.
110// See CountTrailingZerosNonZero for an optimized version for the case that
111// |value| is guaranteed to be non-zero.
112template <typename T, unsigned bits = sizeof(T) * 8>
113inline constexpr
114 typename std::enable_if<std::is_integral<T>::value && sizeof(T) <= 8,
115 unsigned>::type
116 CountTrailingZeros(T value) {
117#if V8_HAS_BUILTIN_CTZ(1)
118 return value == 0 ? bits
2
Assuming 'value' is equal to 0
3
'?' condition is true
4
Returning the value 32
119 : bits == 64 ? __builtin_ctzll(static_cast<uint64_t>(value))
120 : __builtin_ctz(static_cast<uint32_t>(value));
121#else
122 // Fall back to popcount (see "Hacker's Delight" by Henry S. Warren, Jr.),
123 // chapter 5-4. On x64, since is faster than counting in a loop and faster
124 // than doing binary search.
125 using U = typename std::make_unsigned<T>::type;
126 U u = value;
127 return CountPopulation(static_cast<U>(~u & (u - 1u)));
128#endif
129}
130
131inline constexpr unsigned CountTrailingZeros32(uint32_t value) {
132 return CountTrailingZeros(value);
133}
134inline constexpr unsigned CountTrailingZeros64(uint64_t value) {
135 return CountTrailingZeros(value);
136}
137
138// CountTrailingZerosNonZero(value) returns the number of zero bits preceding
139// the least significant 1 bit in |value| if |value| is non-zero, otherwise the
140// behavior is undefined.
141// See CountTrailingZeros for an alternative version that allows |value| == 0.
142template <typename T, unsigned bits = sizeof(T) * 8>
143inline constexpr
144 typename std::enable_if<std::is_integral<T>::value && sizeof(T) <= 8,
145 unsigned>::type
146 CountTrailingZerosNonZero(T value) {
147 DCHECK_NE(0, value)((void) 0);
148#if V8_HAS_BUILTIN_CTZ(1)
149 return bits == 64 ? __builtin_ctzll(static_cast<uint64_t>(value))
150 : __builtin_ctz(static_cast<uint32_t>(value));
151#else
152 return CountTrailingZeros<T, bits>(value);
153#endif
154}
155
156// Returns true iff |value| is a power of 2.
157template <typename T,
158 typename = typename std::enable_if<std::is_integral<T>::value ||
159 std::is_enum<T>::value>::type>
160constexpr inline bool IsPowerOfTwo(T value) {
161 return value > 0 && (value & (value - 1)) == 0;
162}
163
164// Identical to {CountTrailingZeros}, but only works for powers of 2.
165template <typename T,
166 typename = typename std::enable_if<std::is_integral<T>::value>::type>
167inline constexpr int WhichPowerOfTwo(T value) {
168 DCHECK(IsPowerOfTwo(value))((void) 0);
169#if V8_HAS_BUILTIN_CTZ(1)
170 STATIC_ASSERT(sizeof(T) <= 8)static_assert(sizeof(T) <= 8, "sizeof(T) <= 8");
171 return sizeof(T) == 8 ? __builtin_ctzll(static_cast<uint64_t>(value))
172 : __builtin_ctz(static_cast<uint32_t>(value));
173#else
174 // Fall back to popcount (see "Hacker's Delight" by Henry S. Warren, Jr.),
175 // chapter 5-4. On x64, since is faster than counting in a loop and faster
176 // than doing binary search.
177 using U = typename std::make_unsigned<T>::type;
178 U u = value;
179 return CountPopulation(static_cast<U>(u - 1));
180#endif
181}
182
183// RoundUpToPowerOfTwo32(value) returns the smallest power of two which is
184// greater than or equal to |value|. If you pass in a |value| that is already a
185// power of two, it is returned as is. |value| must be less than or equal to
186// 0x80000000u. Uses computation based on leading zeros if we have compiler
187// support for that. Falls back to the implementation from "Hacker's Delight" by
188// Henry S. Warren, Jr., figure 3-3, page 48, where the function is called clp2.
189V8_BASE_EXPORT uint32_t RoundUpToPowerOfTwo32(uint32_t value);
190// Same for 64 bit integers. |value| must be <= 2^63
191V8_BASE_EXPORT uint64_t RoundUpToPowerOfTwo64(uint64_t value);
192// Same for size_t integers.
193inline size_t RoundUpToPowerOfTwo(size_t value) {
194 if (sizeof(size_t) == sizeof(uint64_t)) {
195 return RoundUpToPowerOfTwo64(value);
196 } else {
197 // Without windows.h included this line triggers a truncation warning on
198 // 64-bit builds. Presumably windows.h disables the relevant warning.
199 return RoundUpToPowerOfTwo32(static_cast<uint32_t>(value));
200 }
201}
202
203// RoundDownToPowerOfTwo32(value) returns the greatest power of two which is
204// less than or equal to |value|. If you pass in a |value| that is already a
205// power of two, it is returned as is.
206inline uint32_t RoundDownToPowerOfTwo32(uint32_t value) {
207 if (value > 0x80000000u) return 0x80000000u;
208 uint32_t result = RoundUpToPowerOfTwo32(value);
209 if (result > value) result >>= 1;
210 return result;
211}
212
213
214// Precondition: 0 <= shift < 32
215inline constexpr uint32_t RotateRight32(uint32_t value, uint32_t shift) {
216 return (value >> shift) | (value << ((32 - shift) & 31));
217}
218
219// Precondition: 0 <= shift < 32
220inline constexpr uint32_t RotateLeft32(uint32_t value, uint32_t shift) {
221 return (value << shift) | (value >> ((32 - shift) & 31));
222}
223
224// Precondition: 0 <= shift < 64
225inline constexpr uint64_t RotateRight64(uint64_t value, uint64_t shift) {
226 return (value >> shift) | (value << ((64 - shift) & 63));
227}
228
229// Precondition: 0 <= shift < 64
230inline constexpr uint64_t RotateLeft64(uint64_t value, uint64_t shift) {
231 return (value << shift) | (value >> ((64 - shift) & 63));
232}
233
234// SignedAddOverflow32(lhs,rhs,val) performs a signed summation of |lhs| and
235// |rhs| and stores the result into the variable pointed to by |val| and
236// returns true if the signed summation resulted in an overflow.
237inline bool SignedAddOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
238#if V8_HAS_BUILTIN_SADD_OVERFLOW(1)
239 return __builtin_sadd_overflow(lhs, rhs, val);
240#else
241 uint32_t res = static_cast<uint32_t>(lhs) + static_cast<uint32_t>(rhs);
242 *val = bit_cast<int32_t>(res);
243 return ((res ^ lhs) & (res ^ rhs) & (1U << 31)) != 0;
244#endif
245}
246
247
248// SignedSubOverflow32(lhs,rhs,val) performs a signed subtraction of |lhs| and
249// |rhs| and stores the result into the variable pointed to by |val| and
250// returns true if the signed subtraction resulted in an overflow.
251inline bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
252#if V8_HAS_BUILTIN_SSUB_OVERFLOW(1)
253 return __builtin_ssub_overflow(lhs, rhs, val);
254#else
255 uint32_t res = static_cast<uint32_t>(lhs) - static_cast<uint32_t>(rhs);
256 *val = bit_cast<int32_t>(res);
257 return ((res ^ lhs) & (res ^ ~rhs) & (1U << 31)) != 0;
258#endif
259}
260
261// SignedMulOverflow32(lhs,rhs,val) performs a signed multiplication of |lhs|
262// and |rhs| and stores the result into the variable pointed to by |val| and
263// returns true if the signed multiplication resulted in an overflow.
264V8_BASE_EXPORT bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val);
265
266// SignedAddOverflow64(lhs,rhs,val) performs a signed summation of |lhs| and
267// |rhs| and stores the result into the variable pointed to by |val| and
268// returns true if the signed summation resulted in an overflow.
269inline bool SignedAddOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
270 uint64_t res = static_cast<uint64_t>(lhs) + static_cast<uint64_t>(rhs);
271 *val = bit_cast<int64_t>(res);
272 return ((res ^ lhs) & (res ^ rhs) & (1ULL << 63)) != 0;
273}
274
275
276// SignedSubOverflow64(lhs,rhs,val) performs a signed subtraction of |lhs| and
277// |rhs| and stores the result into the variable pointed to by |val| and
278// returns true if the signed subtraction resulted in an overflow.
279inline bool SignedSubOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
280 uint64_t res = static_cast<uint64_t>(lhs) - static_cast<uint64_t>(rhs);
281 *val = bit_cast<int64_t>(res);
282 return ((res ^ lhs) & (res ^ ~rhs) & (1ULL << 63)) != 0;
283}
284
285// SignedMulHigh32(lhs, rhs) multiplies two signed 32-bit values |lhs| and
286// |rhs|, extracts the most significant 32 bits of the result, and returns
287// those.
288V8_BASE_EXPORT int32_t SignedMulHigh32(int32_t lhs, int32_t rhs);
289
290// SignedMulHighAndAdd32(lhs, rhs, acc) multiplies two signed 32-bit values
291// |lhs| and |rhs|, extracts the most significant 32 bits of the result, and
292// adds the accumulate value |acc|.
293V8_BASE_EXPORT int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs,
294 int32_t acc);
295
296// SignedDiv32(lhs, rhs) divides |lhs| by |rhs| and returns the quotient
297// truncated to int32. If |rhs| is zero, then zero is returned. If |lhs|
298// is minint and |rhs| is -1, it returns minint.
299V8_BASE_EXPORT int32_t SignedDiv32(int32_t lhs, int32_t rhs);
300
301// SignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder
302// truncated to int32. If either |rhs| is zero or |lhs| is minint and |rhs|
303// is -1, it returns zero.
304V8_BASE_EXPORT int32_t SignedMod32(int32_t lhs, int32_t rhs);
305
306// UnsignedAddOverflow32(lhs,rhs,val) performs an unsigned summation of |lhs|
307// and |rhs| and stores the result into the variable pointed to by |val| and
308// returns true if the unsigned summation resulted in an overflow.
309inline bool UnsignedAddOverflow32(uint32_t lhs, uint32_t rhs, uint32_t* val) {
310#if V8_HAS_BUILTIN_SADD_OVERFLOW(1)
311 return __builtin_uadd_overflow(lhs, rhs, val);
312#else
313 *val = lhs + rhs;
314 return *val < (lhs | rhs);
315#endif
316}
317
318
319// UnsignedDiv32(lhs, rhs) divides |lhs| by |rhs| and returns the quotient
320// truncated to uint32. If |rhs| is zero, then zero is returned.
321inline uint32_t UnsignedDiv32(uint32_t lhs, uint32_t rhs) {
322 return rhs ? lhs / rhs : 0u;
323}
324
325
326// UnsignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder
327// truncated to uint32. If |rhs| is zero, then zero is returned.
328inline uint32_t UnsignedMod32(uint32_t lhs, uint32_t rhs) {
329 return rhs ? lhs % rhs : 0u;
330}
331
332// Wraparound integer arithmetic without undefined behavior.
333
334inline int32_t WraparoundAdd32(int32_t lhs, int32_t rhs) {
335 return static_cast<int32_t>(static_cast<uint32_t>(lhs) +
336 static_cast<uint32_t>(rhs));
337}
338
339inline int32_t WraparoundNeg32(int32_t x) {
340 return static_cast<int32_t>(-static_cast<uint32_t>(x));
341}
342
343// SignedSaturatedAdd64(lhs, rhs) adds |lhs| and |rhs|,
344// checks and returns the result.
345V8_BASE_EXPORT int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs);
346
347// SignedSaturatedSub64(lhs, rhs) subtracts |lhs| by |rhs|,
348// checks and returns the result.
349V8_BASE_EXPORT int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs);
350
351} // namespace bits
352} // namespace base
353} // namespace v8
354
355#endif // V8_BASE_BITS_H_