Bug Summary

File:out/../deps/v8/src/utils/allocation.cc
Warning:line 311, column 36
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name allocation.cc -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/home/maurizio/node-v18.6.0/out -resource-dir /usr/local/lib/clang/16.0.0 -D _GLIBCXX_USE_CXX11_ABI=1 -D NODE_OPENSSL_CONF_NAME=nodejs_conf -D NODE_OPENSSL_HAS_QUIC -D V8_GYP_BUILD -D V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP=64 -D __STDC_FORMAT_MACROS -D OPENSSL_NO_PINSHARED -D OPENSSL_THREADS -D V8_TARGET_ARCH_X64 -D V8_HAVE_TARGET_OS -D V8_TARGET_OS_LINUX -D V8_EMBEDDER_STRING="-node.8" -D ENABLE_DISASSEMBLER -D V8_PROMISE_INTERNAL_FIELD_COUNT=1 -D V8_SHORT_BUILTIN_CALLS -D OBJECT_PRINT -D V8_INTL_SUPPORT -D V8_ATOMIC_OBJECT_FIELD_WRITES -D V8_ENABLE_LAZY_SOURCE_POSITIONS -D V8_USE_SIPHASH -D V8_SHARED_RO_HEAP -D V8_WIN64_UNWINDING_INFO -D V8_ENABLE_REGEXP_INTERPRETER_THREADED_DISPATCH -D V8_SNAPSHOT_COMPRESSION -D V8_ENABLE_WEBASSEMBLY -D V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS -D V8_ALLOCATION_FOLDING -D V8_ALLOCATION_SITE_TRACKING -D V8_SCRIPTORMODULE_LEGACY_LIFETIME -D V8_ADVANCED_BIGINT_ALGORITHMS -D ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC -D UCONFIG_NO_SERVICE=1 -D U_ENABLE_DYLOAD=0 -D U_STATIC_IMPLEMENTATION=1 -D U_HAVE_STD_STRING=1 -D UCONFIG_NO_BREAK_ITERATION=0 -I ../deps/v8 -I ../deps/v8/include -I /home/maurizio/node-v18.6.0/out/Release/obj/gen/inspector-generated-output-root -I ../deps/v8/third_party/inspector_protocol -I /home/maurizio/node-v18.6.0/out/Release/obj/gen -I /home/maurizio/node-v18.6.0/out/Release/obj/gen/generate-bytecode-output-root -I ../deps/icu-small/source/i18n -I ../deps/icu-small/source/common -I ../deps/v8/third_party/zlib -I ../deps/v8/third_party/zlib/google -internal-isystem /usr/lib/gcc/x86_64-redhat-linux/8/../../../../include/c++/8 -internal-isystem /usr/lib/gcc/x86_64-redhat-linux/8/../../../../include/c++/8/x86_64-redhat-linux -internal-isystem /usr/lib/gcc/x86_64-redhat-linux/8/../../../../include/c++/8/backward -internal-isystem /usr/local/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-redhat-linux/8/../../../../x86_64-redhat-linux/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wno-return-type -std=gnu++17 -fdeprecated-macro -fdebug-compilation-dir=/home/maurizio/node-v18.6.0/out -ferror-limit 19 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-08-22-142216-507842-1 -x c++ ../deps/v8/src/utils/allocation.cc

../deps/v8/src/utils/allocation.cc

1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/utils/allocation.h"
6
7#include <stdlib.h> // For free, malloc.
8
9#include "src/base/bits.h"
10#include "src/base/bounded-page-allocator.h"
11#include "src/base/lazy-instance.h"
12#include "src/base/logging.h"
13#include "src/base/page-allocator.h"
14#include "src/base/platform/platform.h"
15#include "src/base/platform/wrappers.h"
16#include "src/base/sanitizer/lsan-page-allocator.h"
17#include "src/base/sanitizer/lsan-virtual-address-space.h"
18#include "src/base/vector.h"
19#include "src/base/virtual-address-space.h"
20#include "src/flags/flags.h"
21#include "src/init/v8.h"
22#include "src/sandbox/sandbox.h"
23#include "src/utils/memcopy.h"
24
25#if V8_LIBC_BIONIC
26#include <malloc.h>
27#endif
28
29namespace v8 {
30namespace internal {
31
32namespace {
33
34void* AlignedAllocInternal(size_t size, size_t alignment) {
35 void* ptr;
36#if V8_OS_WIN
37 ptr = _aligned_malloc(size, alignment);
38#elif V8_LIBC_BIONIC
39 // posix_memalign is not exposed in some Android versions, so we fall back to
40 // memalign. See http://code.google.com/p/android/issues/detail?id=35391.
41 ptr = memalign(alignment, size);
42#elif V8_OS_STARBOARD
43 ptr = SbMemoryAllocateAligned(alignment, size);
44#else
45 if (posix_memalign(&ptr, alignment, size)) ptr = nullptr;
46#endif
47 return ptr;
48}
49
50class PageAllocatorInitializer {
51 public:
52 PageAllocatorInitializer() {
53 page_allocator_ = V8::GetCurrentPlatform()->GetPageAllocator();
54 if (page_allocator_ == nullptr) {
55 static base::LeakyObject<base::PageAllocator> default_page_allocator;
56 page_allocator_ = default_page_allocator.get();
57 }
58#if defined(LEAK_SANITIZER)
59 static base::LeakyObject<base::LsanPageAllocator> lsan_allocator(
60 page_allocator_);
61 page_allocator_ = lsan_allocator.get();
62#endif
63 }
64
65 PageAllocator* page_allocator() const { return page_allocator_; }
66
67 void SetPageAllocatorForTesting(PageAllocator* allocator) {
68 page_allocator_ = allocator;
69 }
70
71 private:
72 PageAllocator* page_allocator_;
73};
74
75DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,PageAllocatorInitializer* GetPageAllocatorInitializer() { static
::v8::base::LeakyObject<PageAllocatorInitializer> object
{}; return object.get(); }
76 GetPageAllocatorInitializer)PageAllocatorInitializer* GetPageAllocatorInitializer() { static
::v8::base::LeakyObject<PageAllocatorInitializer> object
{}; return object.get(); }
77
78// We will attempt allocation this many times. After each failure, we call
79// OnCriticalMemoryPressure to try to free some memory.
80const int kAllocationTries = 2;
81
82} // namespace
83
84v8::PageAllocator* GetPlatformPageAllocator() {
85 DCHECK_NOT_NULL(GetPageAllocatorInitializer()->page_allocator())((void) 0);
86 return GetPageAllocatorInitializer()->page_allocator();
87}
88
89v8::VirtualAddressSpace* GetPlatformVirtualAddressSpace() {
90#if defined(LEAK_SANITIZER)
91 static base::LeakyObject<base::LsanVirtualAddressSpace> vas(
92 std::make_unique<base::VirtualAddressSpace>());
93#else
94 static base::LeakyObject<base::VirtualAddressSpace> vas;
95#endif
96 return vas.get();
97}
98
99#ifdef V8_SANDBOX
100v8::PageAllocator* GetSandboxPageAllocator() {
101 // TODO(chromium:1218005) remove this code once the cage is no longer
102 // optional.
103 if (GetProcessWideSandbox()->is_disabled()) {
104 return GetPlatformPageAllocator();
105 } else {
106 CHECK(GetProcessWideSandbox()->is_initialized())do { if ((__builtin_expect(!!(!(GetProcessWideSandbox()->is_initialized
())), 0))) { V8_Fatal("Check failed: %s.", "GetProcessWideSandbox()->is_initialized()"
); } } while (false)
;
107 return GetProcessWideSandbox()->page_allocator();
108 }
109}
110#endif
111
112v8::PageAllocator* SetPlatformPageAllocatorForTesting(
113 v8::PageAllocator* new_page_allocator) {
114 v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
115 GetPageAllocatorInitializer()->SetPageAllocatorForTesting(new_page_allocator);
116 return old_page_allocator;
117}
118
119void* Malloced::operator new(size_t size) {
120 void* result = AllocWithRetry(size);
121 if (result == nullptr) {
122 V8::FatalProcessOutOfMemory(nullptr, "Malloced operator new");
123 }
124 return result;
125}
126
127void Malloced::operator delete(void* p) { base::Free(p); }
128
129char* StrDup(const char* str) {
130 size_t length = strlen(str);
131 char* result = NewArray<char>(length + 1);
132 MemCopy(result, str, length);
133 result[length] = '\0';
134 return result;
135}
136
137char* StrNDup(const char* str, size_t n) {
138 size_t length = strlen(str);
139 if (n < length) length = n;
140 char* result = NewArray<char>(length + 1);
141 MemCopy(result, str, length);
142 result[length] = '\0';
143 return result;
144}
145
146void* AllocWithRetry(size_t size, MallocFn malloc_fn) {
147 void* result = nullptr;
148 for (int i = 0; i < kAllocationTries; ++i) {
149 result = malloc_fn(size);
150 if (result != nullptr) break;
151 if (!OnCriticalMemoryPressure(size)) break;
152 }
153 return result;
154}
155
156void* AlignedAlloc(size_t size, size_t alignment) {
157 DCHECK_LE(alignof(void*), alignment)((void) 0);
158 DCHECK(base::bits::IsPowerOfTwo(alignment))((void) 0);
159 void* result = nullptr;
160 for (int i = 0; i < kAllocationTries; ++i) {
161 result = AlignedAllocInternal(size, alignment);
162 if (result != nullptr) break;
163 if (!OnCriticalMemoryPressure(size + alignment)) break;
164 }
165 if (result == nullptr) {
166 V8::FatalProcessOutOfMemory(nullptr, "AlignedAlloc");
167 }
168 return result;
169}
170
171void AlignedFree(void* ptr) {
172#if V8_OS_WIN
173 _aligned_free(ptr);
174#elif V8_LIBC_BIONIC
175 // Using free is not correct in general, but for V8_LIBC_BIONIC it is.
176 base::Free(ptr);
177#elif V8_OS_STARBOARD
178 SbMemoryFreeAligned(ptr);
179#else
180 base::Free(ptr);
181#endif
182}
183
184size_t AllocatePageSize() {
185 return GetPlatformPageAllocator()->AllocatePageSize();
186}
187
188size_t CommitPageSize() { return GetPlatformPageAllocator()->CommitPageSize(); }
189
190void SetRandomMmapSeed(int64_t seed) {
191 GetPlatformPageAllocator()->SetRandomMmapSeed(seed);
192}
193
194void* GetRandomMmapAddr() {
195 return GetPlatformPageAllocator()->GetRandomMmapAddr();
196}
197
198void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size,
199 size_t alignment, PageAllocator::Permission access) {
200 DCHECK_NOT_NULL(page_allocator)((void) 0);
201 DCHECK_EQ(hint, AlignedAddress(hint, alignment))((void) 0);
202 DCHECK(IsAligned(size, page_allocator->AllocatePageSize()))((void) 0);
203 if (FLAG_randomize_all_allocations) {
204 hint = AlignedAddress(page_allocator->GetRandomMmapAddr(), alignment);
205 }
206 void* result = nullptr;
207 for (int i = 0; i < kAllocationTries; ++i) {
208 result = page_allocator->AllocatePages(hint, size, alignment, access);
209 if (result != nullptr) break;
210 size_t request_size = size + alignment - page_allocator->AllocatePageSize();
211 if (!OnCriticalMemoryPressure(request_size)) break;
212 }
213 return result;
214}
215
216void FreePages(v8::PageAllocator* page_allocator, void* address,
217 const size_t size) {
218 DCHECK_NOT_NULL(page_allocator)((void) 0);
219 DCHECK(IsAligned(size, page_allocator->AllocatePageSize()))((void) 0);
220 CHECK(page_allocator->FreePages(address, size))do { if ((__builtin_expect(!!(!(page_allocator->FreePages(
address, size))), 0))) { V8_Fatal("Check failed: %s.", "page_allocator->FreePages(address, size)"
); } } while (false)
;
221}
222
223void ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
224 size_t new_size) {
225 DCHECK_NOT_NULL(page_allocator)((void) 0);
226 DCHECK_LT(new_size, size)((void) 0);
227 DCHECK(IsAligned(new_size, page_allocator->CommitPageSize()))((void) 0);
228 CHECK(page_allocator->ReleasePages(address, size, new_size))do { if ((__builtin_expect(!!(!(page_allocator->ReleasePages
(address, size, new_size))), 0))) { V8_Fatal("Check failed: %s."
, "page_allocator->ReleasePages(address, size, new_size)")
; } } while (false)
;
229}
230
231bool SetPermissions(v8::PageAllocator* page_allocator, void* address,
232 size_t size, PageAllocator::Permission access) {
233 DCHECK_NOT_NULL(page_allocator)((void) 0);
234 return page_allocator->SetPermissions(address, size, access);
235}
236
237bool OnCriticalMemoryPressure(size_t length) {
238 // TODO(bbudge) Rework retry logic once embedders implement the more
239 // informative overload.
240 if (!V8::GetCurrentPlatform()->OnCriticalMemoryPressure(length)) {
241 V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
242 }
243 return true;
244}
245
246VirtualMemory::VirtualMemory() = default;
247
248VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
249 void* hint, size_t alignment, JitPermission jit)
250 : page_allocator_(page_allocator) {
251 DCHECK_NOT_NULL(page_allocator)((void) 0);
252 DCHECK(IsAligned(size, page_allocator_->CommitPageSize()))((void) 0);
253 size_t page_size = page_allocator_->AllocatePageSize();
254 alignment = RoundUp(alignment, page_size);
255 PageAllocator::Permission permissions =
256 jit == kMapAsJittable ? PageAllocator::kNoAccessWillJitLater
257 : PageAllocator::kNoAccess;
258 Address address = reinterpret_cast<Address>(AllocatePages(
259 page_allocator_, hint, RoundUp(size, page_size), alignment, permissions));
260 if (address != kNullAddress) {
261 DCHECK(IsAligned(address, alignment))((void) 0);
262 region_ = base::AddressRegion(address, size);
263 }
264}
265
266VirtualMemory::~VirtualMemory() {
267 if (IsReserved()) {
22
Taking true branch
268 Free();
23
Calling 'VirtualMemory::Free'
269 }
270}
271
272void VirtualMemory::Reset() {
273 page_allocator_ = nullptr;
18
Null pointer value stored to field 'page_allocator_'
274 region_ = base::AddressRegion();
275}
276
277bool VirtualMemory::SetPermissions(Address address, size_t size,
278 PageAllocator::Permission access) {
279 CHECK(InVM(address, size))do { if ((__builtin_expect(!!(!(InVM(address, size))), 0))) {
V8_Fatal("Check failed: %s.", "InVM(address, size)"); } } while
(false)
;
280 bool result =
281 v8::internal::SetPermissions(page_allocator_, address, size, access);
282 DCHECK(result)((void) 0);
283 return result;
284}
285
286size_t VirtualMemory::Release(Address free_start) {
287 DCHECK(IsReserved())((void) 0);
288 DCHECK(IsAligned(free_start, page_allocator_->CommitPageSize()))((void) 0);
289 // Notice: Order is important here. The VirtualMemory object might live
290 // inside the allocated region.
291
292 const size_t old_size = region_.size();
293 const size_t free_size = old_size - (free_start - region_.begin());
294 CHECK(InVM(free_start, free_size))do { if ((__builtin_expect(!!(!(InVM(free_start, free_size)))
, 0))) { V8_Fatal("Check failed: %s.", "InVM(free_start, free_size)"
); } } while (false)
;
295 region_.set_size(old_size - free_size);
296 ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()),
297 old_size, region_.size());
298 return free_size;
299}
300
301void VirtualMemory::Free() {
302 DCHECK(IsReserved())((void) 0);
303 // Notice: Order is important here. The VirtualMemory object might live
304 // inside the allocated region.
305 v8::PageAllocator* page_allocator = page_allocator_;
24
'page_allocator' initialized to a null pointer value
306 base::AddressRegion region = region_;
307 Reset();
308 // FreePages expects size to be aligned to allocation granularity however
309 // ReleasePages may leave size at only commit granularity. Align it here.
310 FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
311 RoundUp(region.size(), page_allocator->AllocatePageSize()));
25
Called C++ object pointer is null
312}
313
314void VirtualMemory::FreeReadOnly() {
315 DCHECK(IsReserved())((void) 0);
316 // The only difference to Free is that it doesn't call Reset which would write
317 // to the VirtualMemory object.
318 v8::PageAllocator* page_allocator = page_allocator_;
319 base::AddressRegion region = region_;
320
321 // FreePages expects size to be aligned to allocation granularity however
322 // ReleasePages may leave size at only commit granularity. Align it here.
323 FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
324 RoundUp(region.size(), page_allocator->AllocatePageSize()));
325}
326
327VirtualMemoryCage::VirtualMemoryCage() = default;
328
329VirtualMemoryCage::~VirtualMemoryCage() { Free(); }
330
331VirtualMemoryCage::VirtualMemoryCage(VirtualMemoryCage&& other) V8_NOEXCEPTnoexcept {
332 *this = std::move(other);
333}
334
335VirtualMemoryCage& VirtualMemoryCage::operator=(VirtualMemoryCage&& other)
336 V8_NOEXCEPTnoexcept {
337 page_allocator_ = std::move(other.page_allocator_);
338 reservation_ = std::move(other.reservation_);
339 return *this;
340}
341
342namespace {
343inline Address VirtualMemoryCageStart(
344 Address reservation_start,
345 const VirtualMemoryCage::ReservationParams& params) {
346 return RoundUp(reservation_start + params.base_bias_size,
347 params.base_alignment) -
348 params.base_bias_size;
349}
350} // namespace
351
352bool VirtualMemoryCage::InitReservation(
353 const ReservationParams& params, base::AddressRegion existing_reservation) {
354 DCHECK(!reservation_.IsReserved())((void) 0);
355
356 const size_t allocate_page_size = params.page_allocator->AllocatePageSize();
357 CHECK(IsAligned(params.reservation_size, allocate_page_size))do { if ((__builtin_expect(!!(!(IsAligned(params.reservation_size
, allocate_page_size))), 0))) { V8_Fatal("Check failed: %s.",
"IsAligned(params.reservation_size, allocate_page_size)"); }
} while (false)
;
1
Taking false branch
2
Loop condition is false. Exiting loop
358 CHECK(params.base_alignment == ReservationParams::kAnyBaseAlignment ||do { if ((__builtin_expect(!!(!(params.base_alignment == ReservationParams
::kAnyBaseAlignment || (IsAligned(params.base_alignment, allocate_page_size
) && IsAligned(params.base_bias_size, allocate_page_size
)))), 0))) { V8_Fatal("Check failed: %s.", "params.base_alignment == ReservationParams::kAnyBaseAlignment || (IsAligned(params.base_alignment, allocate_page_size) && IsAligned(params.base_bias_size, allocate_page_size))"
); } } while (false)
3
Assuming 'kAnyBaseAlignment' is equal to field 'base_alignment'
4
Taking false branch
5
Loop condition is false. Exiting loop
359 (IsAligned(params.base_alignment, allocate_page_size) &&do { if ((__builtin_expect(!!(!(params.base_alignment == ReservationParams
::kAnyBaseAlignment || (IsAligned(params.base_alignment, allocate_page_size
) && IsAligned(params.base_bias_size, allocate_page_size
)))), 0))) { V8_Fatal("Check failed: %s.", "params.base_alignment == ReservationParams::kAnyBaseAlignment || (IsAligned(params.base_alignment, allocate_page_size) && IsAligned(params.base_bias_size, allocate_page_size))"
); } } while (false)
360 IsAligned(params.base_bias_size, allocate_page_size)))do { if ((__builtin_expect(!!(!(params.base_alignment == ReservationParams
::kAnyBaseAlignment || (IsAligned(params.base_alignment, allocate_page_size
) && IsAligned(params.base_bias_size, allocate_page_size
)))), 0))) { V8_Fatal("Check failed: %s.", "params.base_alignment == ReservationParams::kAnyBaseAlignment || (IsAligned(params.base_alignment, allocate_page_size) && IsAligned(params.base_bias_size, allocate_page_size))"
); } } while (false)
;
361 CHECK_LE(params.base_bias_size, params.reservation_size)do { bool _cmp = ::v8::base::CmpLEImpl< typename ::v8::base
::pass_value_or_ref<decltype(params.base_bias_size)>::type
, typename ::v8::base::pass_value_or_ref<decltype(params.reservation_size
)>::type>((params.base_bias_size), (params.reservation_size
)); do { if ((__builtin_expect(!!(!(_cmp)), 0))) { V8_Fatal("Check failed: %s."
, "params.base_bias_size" " " "<=" " " "params.reservation_size"
); } } while (false); } while (false)
;
6
Taking false branch
7
Loop condition is false. Exiting loop
8
Loop condition is false. Exiting loop
362
363 if (!existing_reservation.is_empty()) {
9
Assuming the condition is true
10
Taking true branch
364 CHECK_EQ(existing_reservation.size(), params.reservation_size)do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base
::pass_value_or_ref<decltype(existing_reservation.size())>
::type, typename ::v8::base::pass_value_or_ref<decltype(params
.reservation_size)>::type>((existing_reservation.size()
), (params.reservation_size)); do { if ((__builtin_expect(!!(
!(_cmp)), 0))) { V8_Fatal("Check failed: %s.", "existing_reservation.size()"
" " "==" " " "params.reservation_size"); } } while (false); }
while (false)
;
11
Taking false branch
12
Loop condition is false. Exiting loop
13
Loop condition is false. Exiting loop
365 CHECK(params.base_alignment == ReservationParams::kAnyBaseAlignment ||do { if ((__builtin_expect(!!(!(params.base_alignment == ReservationParams
::kAnyBaseAlignment || IsAligned(existing_reservation.begin()
, params.base_alignment))), 0))) { V8_Fatal("Check failed: %s."
, "params.base_alignment == ReservationParams::kAnyBaseAlignment || IsAligned(existing_reservation.begin(), params.base_alignment)"
); } } while (false)
14
Taking false branch
15
Loop condition is false. Exiting loop
366 IsAligned(existing_reservation.begin(), params.base_alignment))do { if ((__builtin_expect(!!(!(params.base_alignment == ReservationParams
::kAnyBaseAlignment || IsAligned(existing_reservation.begin()
, params.base_alignment))), 0))) { V8_Fatal("Check failed: %s."
, "params.base_alignment == ReservationParams::kAnyBaseAlignment || IsAligned(existing_reservation.begin(), params.base_alignment)"
); } } while (false)
;
367 reservation_ =
16
Calling move assignment operator for 'VirtualMemory'
20
Returning from move assignment operator for 'VirtualMemory'
368 VirtualMemory(params.page_allocator, existing_reservation.begin(),
369 existing_reservation.size());
21
Calling '~VirtualMemory'
370 base_ = reservation_.address() + params.base_bias_size;
371 } else if (params.base_alignment == ReservationParams::kAnyBaseAlignment ||
372 params.base_bias_size == 0) {
373 // When the base doesn't need to be aligned or when the requested
374 // base_bias_size is zero, the virtual memory reservation fails only
375 // due to OOM.
376 Address hint =
377 RoundDown(params.requested_start_hint,
378 RoundUp(params.base_alignment, allocate_page_size));
379 VirtualMemory reservation(params.page_allocator, params.reservation_size,
380 reinterpret_cast<void*>(hint),
381 params.base_alignment);
382 if (!reservation.IsReserved()) return false;
383
384 reservation_ = std::move(reservation);
385 base_ = reservation_.address() + params.base_bias_size;
386 CHECK_EQ(reservation_.size(), params.reservation_size)do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base
::pass_value_or_ref<decltype(reservation_.size())>::type
, typename ::v8::base::pass_value_or_ref<decltype(params.reservation_size
)>::type>((reservation_.size()), (params.reservation_size
)); do { if ((__builtin_expect(!!(!(_cmp)), 0))) { V8_Fatal("Check failed: %s."
, "reservation_.size()" " " "==" " " "params.reservation_size"
); } } while (false); } while (false)
;
387 } else {
388 // Otherwise, we need to try harder by first overreserving
389 // in hopes of finding a correctly aligned address within the larger
390 // reservation.
391 Address hint =
392 RoundDown(params.requested_start_hint,
393 RoundUp(params.base_alignment, allocate_page_size)) -
394 RoundUp(params.base_bias_size, allocate_page_size);
395 const int kMaxAttempts = 4;
396 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
397 // Reserve a region of twice the size so that there is an aligned address
398 // within it that's usable as the cage base.
399 VirtualMemory padded_reservation(params.page_allocator,
400 params.reservation_size * 2,
401 reinterpret_cast<void*>(hint));
402 if (!padded_reservation.IsReserved()) return false;
403
404 // Find properly aligned sub-region inside the reservation.
405 Address address =
406 VirtualMemoryCageStart(padded_reservation.address(), params);
407 CHECK(padded_reservation.InVM(address, params.reservation_size))do { if ((__builtin_expect(!!(!(padded_reservation.InVM(address
, params.reservation_size))), 0))) { V8_Fatal("Check failed: %s."
, "padded_reservation.InVM(address, params.reservation_size)"
); } } while (false)
;
408
409#if defined(V8_OS_FUCHSIA)
410 // Fuchsia does not respect given hints so as a workaround we will use
411 // overreserved address space region instead of trying to re-reserve
412 // a subregion.
413 bool overreserve = true;
414#else
415 // For the last attempt use the overreserved region to avoid an OOM crash.
416 // This case can happen if there are many isolates being created in
417 // parallel that race for reserving the regions.
418 bool overreserve = (attempt == kMaxAttempts - 1);
419#endif
420
421 if (overreserve) {
422 if (padded_reservation.InVM(address, params.reservation_size)) {
423 reservation_ = std::move(padded_reservation);
424 base_ = address + params.base_bias_size;
425 break;
426 }
427 } else {
428 // Now free the padded reservation and immediately try to reserve an
429 // exact region at aligned address. We have to do this dancing because
430 // the reservation address requirement is more complex than just a
431 // certain alignment and not all operating systems support freeing parts
432 // of reserved address space regions.
433 padded_reservation.Free();
434
435 VirtualMemory reservation(params.page_allocator,
436 params.reservation_size,
437 reinterpret_cast<void*>(address));
438 if (!reservation.IsReserved()) return false;
439
440 // The reservation could still be somewhere else but we can accept it
441 // if it has the required alignment.
442 Address start_address =
443 VirtualMemoryCageStart(reservation.address(), params);
444 if (reservation.address() == start_address) {
445 reservation_ = std::move(reservation);
446 base_ = start_address + params.base_bias_size;
447 CHECK_EQ(reservation_.size(), params.reservation_size)do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base
::pass_value_or_ref<decltype(reservation_.size())>::type
, typename ::v8::base::pass_value_or_ref<decltype(params.reservation_size
)>::type>((reservation_.size()), (params.reservation_size
)); do { if ((__builtin_expect(!!(!(_cmp)), 0))) { V8_Fatal("Check failed: %s."
, "reservation_.size()" " " "==" " " "params.reservation_size"
); } } while (false); } while (false)
;
448 break;
449 }
450 }
451 }
452 }
453 CHECK_NE(base_, kNullAddress)do { bool _cmp = ::v8::base::CmpNEImpl< typename ::v8::base
::pass_value_or_ref<decltype(base_)>::type, typename ::
v8::base::pass_value_or_ref<decltype(kNullAddress)>::type
>((base_), (kNullAddress)); do { if ((__builtin_expect(!!(
!(_cmp)), 0))) { V8_Fatal("Check failed: %s.", "base_" " " "!="
" " "kNullAddress"); } } while (false); } while (false)
;
454 CHECK(IsAligned(base_, params.base_alignment))do { if ((__builtin_expect(!!(!(IsAligned(base_, params.base_alignment
))), 0))) { V8_Fatal("Check failed: %s.", "IsAligned(base_, params.base_alignment)"
); } } while (false)
;
455
456 const Address allocatable_base = RoundUp(base_, params.page_size);
457 const size_t allocatable_size =
458 RoundDown(params.reservation_size - (allocatable_base - base_) -
459 params.base_bias_size,
460 params.page_size);
461 size_ = allocatable_base + allocatable_size - base_;
462 page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
463 params.page_allocator, allocatable_base, allocatable_size,
464 params.page_size,
465 base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
466 return true;
467}
468
469void VirtualMemoryCage::Free() {
470 if (IsReserved()) {
471 base_ = kNullAddress;
472 size_ = 0;
473 page_allocator_.reset();
474 reservation_.Free();
475 }
476}
477
478} // namespace internal
479} // namespace v8

../deps/v8/src/utils/allocation.h

1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_UTILS_ALLOCATION_H_
6#define V8_UTILS_ALLOCATION_H_
7
8#include "include/v8-platform.h"
9#include "src/base/address-region.h"
10#include "src/base/compiler-specific.h"
11#include "src/base/platform/platform.h"
12#include "src/common/globals.h"
13#include "src/init/v8.h"
14
15namespace v8 {
16
17namespace base {
18class BoundedPageAllocator;
19} // namespace base
20
21namespace internal {
22
23class Isolate;
24
25// This file defines memory allocation functions. If a first attempt at an
26// allocation fails, these functions call back into the embedder, then attempt
27// the allocation a second time. The embedder callback must not reenter V8.
28
29// Called when allocation routines fail to allocate, even with a possible retry.
30// This function should not return, but should terminate the current processing.
31[[noreturn]] V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(
32 Isolate* isolate, const char* message);
33
34// Superclass for classes managed with new & delete.
35class V8_EXPORT_PRIVATE Malloced {
36 public:
37 static void* operator new(size_t size);
38 static void operator delete(void* p);
39};
40
41template <typename T>
42T* NewArray(size_t size) {
43 T* result = new (std::nothrow) T[size];
44 if (result == nullptr) {
45 V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
46 result = new (std::nothrow) T[size];
47 if (result == nullptr) FatalProcessOutOfMemory(nullptr, "NewArray");
48 }
49 return result;
50}
51
52template <typename T, typename = typename std::enable_if<
53 base::is_trivially_copyable<T>::value>::type>
54T* NewArray(size_t size, T default_val) {
55 T* result = reinterpret_cast<T*>(NewArray<uint8_t>(sizeof(T) * size));
56 for (size_t i = 0; i < size; ++i) result[i] = default_val;
57 return result;
58}
59
60template <typename T>
61void DeleteArray(T* array) {
62 delete[] array;
63}
64
65template <typename T>
66struct ArrayDeleter {
67 void operator()(T* array) { DeleteArray(array); }
68};
69
70template <typename T>
71using ArrayUniquePtr = std::unique_ptr<T, ArrayDeleter<T>>;
72
73// The normal strdup functions use malloc. These versions of StrDup
74// and StrNDup uses new and calls the FatalProcessOutOfMemory handler
75// if allocation fails.
76V8_EXPORT_PRIVATE char* StrDup(const char* str);
77char* StrNDup(const char* str, int n);
78
79// Allocation policy for allocating in the C free store using malloc
80// and free. Used as the default policy for lists.
81class FreeStoreAllocationPolicy {
82 public:
83 template <typename T, typename TypeTag = T[]>
84 V8_INLINEinline __attribute__((always_inline)) T* NewArray(size_t length) {
85 return static_cast<T*>(Malloced::operator new(length * sizeof(T)));
86 }
87 template <typename T, typename TypeTag = T[]>
88 V8_INLINEinline __attribute__((always_inline)) void DeleteArray(T* p, size_t length) {
89 Malloced::operator delete(p);
90 }
91};
92
93using MallocFn = void* (*)(size_t);
94
95// Performs a malloc, with retry logic on failure. Returns nullptr on failure.
96// Call free to release memory allocated with this function.
97void* AllocWithRetry(size_t size, MallocFn = base::Malloc);
98
99V8_EXPORT_PRIVATE void* AlignedAlloc(size_t size, size_t alignment);
100V8_EXPORT_PRIVATE void AlignedFree(void* ptr);
101
102// Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
103V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
104
105// Returns platfrom virtual memory space instance. Guaranteed to be a valid
106// pointer.
107V8_EXPORT_PRIVATE v8::VirtualAddressSpace* GetPlatformVirtualAddressSpace();
108
109#ifdef V8_SANDBOX
110// Returns the page allocator instance for allocating pages inside the sandbox.
111// Guaranteed to be a valid pointer.
112V8_EXPORT_PRIVATE v8::PageAllocator* GetSandboxPageAllocator();
113#endif
114
115// Returns the appropriate page allocator to use for ArrayBuffer backing
116// stores. If the sandbox is enabled, these must be allocated inside the
117// sandbox and so this will be the SandboxPageAllocator. Otherwise it will be
118// the PlatformPageAllocator.
119inline v8::PageAllocator* GetArrayBufferPageAllocator() {
120#ifdef V8_SANDBOX
121 return GetSandboxPageAllocator();
122#else
123 return GetPlatformPageAllocator();
124#endif
125}
126
127// Sets the given page allocator as the platform page allocator and returns
128// the current one. This function *must* be used only for testing purposes.
129// It is not thread-safe and the testing infrastructure should ensure that
130// the tests do not modify the value simultaneously.
131V8_EXPORT_PRIVATE v8::PageAllocator* SetPlatformPageAllocatorForTesting(
132 v8::PageAllocator* page_allocator);
133
134// Gets the page granularity for AllocatePages and FreePages. Addresses returned
135// by AllocatePages are aligned to this size.
136V8_EXPORT_PRIVATE size_t AllocatePageSize();
137
138// Gets the granularity at which the permissions and release calls can be made.
139V8_EXPORT_PRIVATE size_t CommitPageSize();
140
141// Sets the random seed so that GetRandomMmapAddr() will generate repeatable
142// sequences of random mmap addresses.
143V8_EXPORT_PRIVATE void SetRandomMmapSeed(int64_t seed);
144
145// Generate a random address to be used for hinting allocation calls.
146V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
147
148// Allocates memory. Permissions are set according to the access argument.
149// |address| is a hint. |size| and |alignment| must be multiples of
150// AllocatePageSize(). Returns the address of the allocated memory, with the
151// specified size and alignment, or nullptr on failure.
152V8_EXPORT_PRIVATE
153V8_WARN_UNUSED_RESULT__attribute__((warn_unused_result)) void* AllocatePages(v8::PageAllocator* page_allocator,
154 void* address, size_t size,
155 size_t alignment,
156 PageAllocator::Permission access);
157
158// Frees memory allocated by a call to AllocatePages. |address| and |size| must
159// be multiples of AllocatePageSize().
160V8_EXPORT_PRIVATE
161void FreePages(v8::PageAllocator* page_allocator, void* address,
162 const size_t size);
163
164// Releases memory that is no longer needed. The range specified by |address|
165// and |size| must be an allocated memory region. |size| and |new_size| must be
166// multiples of CommitPageSize(). Memory from |new_size| to |size| is released.
167// Released memory is left in an undefined state, so it should not be accessed.
168V8_EXPORT_PRIVATE
169void ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
170 size_t new_size);
171
172// Sets permissions according to |access|. |address| and |size| must be
173// multiples of CommitPageSize(). Setting permission to kNoAccess may
174// cause the memory contents to be lost. Returns true on success, otherwise
175// false.
176V8_EXPORT_PRIVATE
177V8_WARN_UNUSED_RESULT__attribute__((warn_unused_result)) bool SetPermissions(v8::PageAllocator* page_allocator,
178 void* address, size_t size,
179 PageAllocator::Permission access);
180inline bool SetPermissions(v8::PageAllocator* page_allocator, Address address,
181 size_t size, PageAllocator::Permission access) {
182 return SetPermissions(page_allocator, reinterpret_cast<void*>(address), size,
183 access);
184}
185
186// Function that may release reserved memory regions to allow failed allocations
187// to succeed. |length| is the amount of memory needed. Returns |true| if memory
188// could be released, false otherwise.
189V8_EXPORT_PRIVATE bool OnCriticalMemoryPressure(size_t length);
190
191// Represents and controls an area of reserved memory.
192class VirtualMemory final {
193 public:
194 enum JitPermission { kNoJit, kMapAsJittable };
195
196 // Empty VirtualMemory object, controlling no reserved memory.
197 V8_EXPORT_PRIVATE VirtualMemory();
198
199 VirtualMemory(const VirtualMemory&) = delete;
200 VirtualMemory& operator=(const VirtualMemory&) = delete;
201
202 // Reserves virtual memory containing an area of the given size that is
203 // aligned per |alignment| rounded up to the |page_allocator|'s allocate page
204 // size. The |size| must be aligned with |page_allocator|'s commit page size.
205 // This may not be at the position returned by address().
206 V8_EXPORT_PRIVATE VirtualMemory(v8::PageAllocator* page_allocator,
207 size_t size, void* hint, size_t alignment = 1,
208 JitPermission jit = kNoJit);
209
210 // Construct a virtual memory by assigning it some already mapped address
211 // and size.
212 VirtualMemory(v8::PageAllocator* page_allocator, Address address, size_t size)
213 : page_allocator_(page_allocator), region_(address, size) {
214 DCHECK_NOT_NULL(page_allocator)((void) 0);
215 DCHECK(IsAligned(address, page_allocator->AllocatePageSize()))((void) 0);
216 DCHECK(IsAligned(size, page_allocator->CommitPageSize()))((void) 0);
217 }
218
219 // Releases the reserved memory, if any, controlled by this VirtualMemory
220 // object.
221 V8_EXPORT_PRIVATE ~VirtualMemory();
222
223 // Move constructor.
224 VirtualMemory(VirtualMemory&& other) V8_NOEXCEPTnoexcept { *this = std::move(other); }
225
226 // Move assignment operator.
227 VirtualMemory& operator=(VirtualMemory&& other) V8_NOEXCEPTnoexcept {
228 DCHECK(!IsReserved())((void) 0);
229 page_allocator_ = other.page_allocator_;
230 region_ = other.region_;
231 other.Reset();
17
Calling 'VirtualMemory::Reset'
19
Returning from 'VirtualMemory::Reset'
232 return *this;
233 }
234
235 // Returns whether the memory has been reserved.
236 bool IsReserved() const { return region_.begin() != kNullAddress; }
237
238 // Initialize or resets an embedded VirtualMemory object.
239 V8_EXPORT_PRIVATE void Reset();
240
241 v8::PageAllocator* page_allocator() { return page_allocator_; }
242
243 const base::AddressRegion& region() const { return region_; }
244
245 // Returns the start address of the reserved memory.
246 // If the memory was reserved with an alignment, this address is not
247 // necessarily aligned. The user might need to round it up to a multiple of
248 // the alignment to get the start of the aligned block.
249 Address address() const {
250 DCHECK(IsReserved())((void) 0);
251 return region_.begin();
252 }
253
254 Address end() const {
255 DCHECK(IsReserved())((void) 0);
256 return region_.end();
257 }
258
259 // Returns the size of the reserved memory. The returned value is only
260 // meaningful when IsReserved() returns true.
261 // If the memory was reserved with an alignment, this size may be larger
262 // than the requested size.
263 size_t size() const { return region_.size(); }
264
265 // Sets permissions according to the access argument. address and size must be
266 // multiples of CommitPageSize(). Returns true on success, otherwise false.
267 V8_EXPORT_PRIVATE bool SetPermissions(Address address, size_t size,
268 PageAllocator::Permission access);
269
270 // Releases memory after |free_start|. Returns the number of bytes released.
271 V8_EXPORT_PRIVATE size_t Release(Address free_start);
272
273 // Frees all memory.
274 V8_EXPORT_PRIVATE void Free();
275
276 // As with Free but does not write to the VirtualMemory object itself so it
277 // can be called on a VirtualMemory that is itself not writable.
278 V8_EXPORT_PRIVATE void FreeReadOnly();
279
280 bool InVM(Address address, size_t size) const {
281 return region_.contains(address, size);
282 }
283
284 private:
285 // Page allocator that controls the virtual memory.
286 v8::PageAllocator* page_allocator_ = nullptr;
287 base::AddressRegion region_;
288};
289
290// Represents a VirtualMemory reservation along with a BoundedPageAllocator that
291// can be used to allocate within the reservation.
292//
293// Virtual memory cages are used for both the pointer compression cage and code
294// ranges (on platforms that require code ranges) and are configurable via
295// ReservationParams.
296//
297// +------------+-----------+------------ ~~~ --+- ~~~ -+
298// | ... | ... | ... | ... |
299// +------------+-----------+------------ ~~~ --+- ~~~ -+
300// ^ ^ ^
301// start cage base allocatable base
302//
303// <------------> <------------------->
304// base bias size allocatable size
305// <------------------------------->
306// cage size
307// <---------------------------------------------------->
308// reservation size
309//
310// - The reservation is made using ReservationParams::page_allocator.
311// - start is the start of the virtual memory reservation.
312// - cage base is the base address of the cage.
313// - allocatable base is the cage base rounded up to the nearest
314// ReservationParams::page_size, and is the start of the allocatable area for
315// the BoundedPageAllocator.
316// - cage size is the size of the area from cage base to the end of the
317// allocatable area.
318//
319// - The base bias is configured by ReservationParams::base_bias_size.
320// - The reservation size is configured by ReservationParams::reservation_size
321// but it might be actually bigger if we end up over-reserving the virtual
322// address space.
323//
324// Additionally,
325// - The alignment of the cage base is configured by
326// ReservationParams::base_alignment.
327// - The page size of the BoundedPageAllocator is configured by
328// ReservationParams::page_size.
329// - A hint for the value of start can be passed by
330// ReservationParams::requested_start_hint.
331//
332// The configuration is subject to the following alignment requirements.
333// Below, AllocatePageSize is short for
334// ReservationParams::page_allocator->AllocatePageSize().
335//
336// - The reservation size must be AllocatePageSize-aligned.
337// - If the base alignment is not kAnyBaseAlignment, both the base alignment
338// and the base bias size must be AllocatePageSize-aligned.
339// - The base alignment may be kAnyBaseAlignment to denote any alignment is
340// acceptable. In this case the base bias size does not need to be aligned.
341class VirtualMemoryCage {
342 public:
343 VirtualMemoryCage();
344 virtual ~VirtualMemoryCage();
345
346 VirtualMemoryCage(const VirtualMemoryCage&) = delete;
347 VirtualMemoryCage& operator=(VirtualMemoryCage&) = delete;
348
349 VirtualMemoryCage(VirtualMemoryCage&& other) V8_NOEXCEPTnoexcept;
350 VirtualMemoryCage& operator=(VirtualMemoryCage&& other) V8_NOEXCEPTnoexcept;
351
352 Address base() const { return base_; }
353 size_t size() const { return size_; }
354
355 base::BoundedPageAllocator* page_allocator() const {
356 return page_allocator_.get();
357 }
358
359 VirtualMemory* reservation() { return &reservation_; }
360 const VirtualMemory* reservation() const { return &reservation_; }
361
362 bool IsReserved() const {
363 DCHECK_EQ(base_ != kNullAddress, reservation_.IsReserved())((void) 0);
364 DCHECK_EQ(base_ != kNullAddress, size_ != 0)((void) 0);
365 return reservation_.IsReserved();
366 }
367
368 struct ReservationParams {
369 // The allocator to use to reserve the virtual memory.
370 v8::PageAllocator* page_allocator;
371 // See diagram above.
372 size_t reservation_size;
373 size_t base_alignment;
374 size_t base_bias_size;
375 size_t page_size;
376 Address requested_start_hint;
377
378 static constexpr size_t kAnyBaseAlignment = 1;
379 };
380
381 // A number of attempts is made to try to reserve a region that satisfies the
382 // constraints in params, but this may fail. The base address may be different
383 // than the one requested.
384 // If an existing reservation is provided, it will be used for this cage
385 // instead. The caller retains ownership of the reservation and is responsible
386 // for keeping the memory reserved during the lifetime of this object.
387 bool InitReservation(
388 const ReservationParams& params,
389 base::AddressRegion existing_reservation = base::AddressRegion());
390
391 void Free();
392
393 protected:
394 Address base_ = kNullAddress;
395 size_t size_ = 0;
396 std::unique_ptr<base::BoundedPageAllocator> page_allocator_;
397 VirtualMemory reservation_;
398};
399
400} // namespace internal
401} // namespace v8
402
403#endif // V8_UTILS_ALLOCATION_H_