Bug Summary

File:out/../deps/v8/src/heap/read-only-spaces.cc
Warning:line 680, column 5
Value stored to 'allocation_size' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name read-only-spaces.cc -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/home/maurizio/node-v18.6.0/out -resource-dir /usr/local/lib/clang/16.0.0 -D _GLIBCXX_USE_CXX11_ABI=1 -D NODE_OPENSSL_CONF_NAME=nodejs_conf -D NODE_OPENSSL_HAS_QUIC -D V8_GYP_BUILD -D V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP=64 -D __STDC_FORMAT_MACROS -D OPENSSL_NO_PINSHARED -D OPENSSL_THREADS -D V8_TARGET_ARCH_X64 -D V8_HAVE_TARGET_OS -D V8_TARGET_OS_LINUX -D V8_EMBEDDER_STRING="-node.8" -D ENABLE_DISASSEMBLER -D V8_PROMISE_INTERNAL_FIELD_COUNT=1 -D V8_SHORT_BUILTIN_CALLS -D OBJECT_PRINT -D V8_INTL_SUPPORT -D V8_ATOMIC_OBJECT_FIELD_WRITES -D V8_ENABLE_LAZY_SOURCE_POSITIONS -D V8_USE_SIPHASH -D V8_SHARED_RO_HEAP -D V8_WIN64_UNWINDING_INFO -D V8_ENABLE_REGEXP_INTERPRETER_THREADED_DISPATCH -D V8_SNAPSHOT_COMPRESSION -D V8_ENABLE_WEBASSEMBLY -D V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS -D V8_ALLOCATION_FOLDING -D V8_ALLOCATION_SITE_TRACKING -D V8_SCRIPTORMODULE_LEGACY_LIFETIME -D V8_ADVANCED_BIGINT_ALGORITHMS -D ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC -D UCONFIG_NO_SERVICE=1 -D U_ENABLE_DYLOAD=0 -D U_STATIC_IMPLEMENTATION=1 -D U_HAVE_STD_STRING=1 -D UCONFIG_NO_BREAK_ITERATION=0 -I ../deps/v8 -I ../deps/v8/include -I /home/maurizio/node-v18.6.0/out/Release/obj/gen/inspector-generated-output-root -I ../deps/v8/third_party/inspector_protocol -I /home/maurizio/node-v18.6.0/out/Release/obj/gen -I /home/maurizio/node-v18.6.0/out/Release/obj/gen/generate-bytecode-output-root -I ../deps/icu-small/source/i18n -I ../deps/icu-small/source/common -I ../deps/v8/third_party/zlib -I ../deps/v8/third_party/zlib/google -internal-isystem /usr/lib/gcc/x86_64-redhat-linux/8/../../../../include/c++/8 -internal-isystem /usr/lib/gcc/x86_64-redhat-linux/8/../../../../include/c++/8/x86_64-redhat-linux -internal-isystem /usr/lib/gcc/x86_64-redhat-linux/8/../../../../include/c++/8/backward -internal-isystem /usr/local/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-redhat-linux/8/../../../../x86_64-redhat-linux/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wno-return-type -std=gnu++17 -fdeprecated-macro -fdebug-compilation-dir=/home/maurizio/node-v18.6.0/out -ferror-limit 19 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-08-22-142216-507842-1 -x c++ ../deps/v8/src/heap/read-only-spaces.cc
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/heap/read-only-spaces.h"
6
7#include <memory>
8
9#include "include/v8-internal.h"
10#include "include/v8-platform.h"
11#include "src/base/logging.h"
12#include "src/common/globals.h"
13#include "src/common/ptr-compr-inl.h"
14#include "src/execution/isolate.h"
15#include "src/heap/allocation-stats.h"
16#include "src/heap/basic-memory-chunk.h"
17#include "src/heap/combined-heap.h"
18#include "src/heap/heap-inl.h"
19#include "src/heap/memory-allocator.h"
20#include "src/heap/memory-chunk.h"
21#include "src/heap/read-only-heap.h"
22#include "src/objects/objects-inl.h"
23#include "src/objects/property-details.h"
24#include "src/objects/string.h"
25#include "src/snapshot/read-only-deserializer.h"
26
27namespace v8 {
28namespace internal {
29
30void CopyAndRebaseRoots(Address* src, Address* dst, Address new_base) {
31 Address src_base = GetIsolateRootAddress(src[0]);
32 for (size_t i = 0; i < ReadOnlyHeap::kEntriesCount; ++i) {
33 dst[i] = src[i] - src_base + new_base;
34 }
35}
36
37void ReadOnlyArtifacts::set_read_only_heap(
38 std::unique_ptr<ReadOnlyHeap> read_only_heap) {
39 read_only_heap_ = std::move(read_only_heap);
40}
41
42void ReadOnlyArtifacts::InitializeChecksum(
43 SnapshotData* read_only_snapshot_data) {
44#ifdef DEBUG
45 read_only_blob_checksum_ = Checksum(read_only_snapshot_data->Payload());
46#endif // DEBUG
47}
48
49void ReadOnlyArtifacts::VerifyChecksum(SnapshotData* read_only_snapshot_data,
50 bool read_only_heap_created) {
51#ifdef DEBUG
52 if (read_only_blob_checksum_) {
53 // The read-only heap was set up from a snapshot. Make sure it's the always
54 // the same snapshot.
55 uint32_t snapshot_checksum = Checksum(read_only_snapshot_data->Payload());
56 CHECK_WITH_MSG(snapshot_checksum,do { if ((__builtin_expect(!!(!(snapshot_checksum)), 0))) { V8_Fatal
("Check failed: %s.", "Attempt to create the read-only heap after already "
"creating from a snapshot."); } } while (false)
57 "Attempt to create the read-only heap after already "do { if ((__builtin_expect(!!(!(snapshot_checksum)), 0))) { V8_Fatal
("Check failed: %s.", "Attempt to create the read-only heap after already "
"creating from a snapshot."); } } while (false)
58 "creating from a snapshot.")do { if ((__builtin_expect(!!(!(snapshot_checksum)), 0))) { V8_Fatal
("Check failed: %s.", "Attempt to create the read-only heap after already "
"creating from a snapshot."); } } while (false)
;
59 if (!FLAG_stress_snapshot) {
60 // --stress-snapshot is only intended to check how well the
61 // serializer/deserializer copes with unexpected objects, and is not
62 // intended to test whether the newly deserialized Isolate would actually
63 // work since it serializes a currently running Isolate, which is not
64 // supported. As a result, it's possible that it will create a new
65 // read-only snapshot that is not compatible with the original one (for
66 // instance due to the string table being re-ordered). Since we won't
67 // acutally use that new Isoalte, we're ok with any potential corruption.
68 // See crbug.com/1043058.
69 CHECK_EQ(read_only_blob_checksum_, snapshot_checksum)do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base
::pass_value_or_ref<decltype(read_only_blob_checksum_)>
::type, typename ::v8::base::pass_value_or_ref<decltype(snapshot_checksum
)>::type>((read_only_blob_checksum_), (snapshot_checksum
)); do { if ((__builtin_expect(!!(!(_cmp)), 0))) { V8_Fatal("Check failed: %s."
, "read_only_blob_checksum_" " " "==" " " "snapshot_checksum"
); } } while (false); } while (false)
;
70 }
71 } else {
72 // If there's no checksum, then that means the read-only heap objects are
73 // being created.
74 CHECK(read_only_heap_created)do { if ((__builtin_expect(!!(!(read_only_heap_created)), 0))
) { V8_Fatal("Check failed: %s.", "read_only_heap_created"); }
} while (false)
;
75 }
76#endif // DEBUG
77}
78
79SingleCopyReadOnlyArtifacts::~SingleCopyReadOnlyArtifacts() {
80 // This particular SharedReadOnlySpace should not destroy its own pages as
81 // TearDown requires MemoryAllocator which itself is tied to an Isolate.
82 shared_read_only_space_->pages_.resize(0);
83
84 for (ReadOnlyPage* chunk : pages_) {
85 void* chunk_address = reinterpret_cast<void*>(chunk->address());
86 size_t size = RoundUp(chunk->size(), page_allocator_->AllocatePageSize());
87 CHECK(page_allocator_->FreePages(chunk_address, size))do { if ((__builtin_expect(!!(!(page_allocator_->FreePages
(chunk_address, size))), 0))) { V8_Fatal("Check failed: %s.",
"page_allocator_->FreePages(chunk_address, size)"); } } while
(false)
;
88 }
89}
90
91ReadOnlyHeap* SingleCopyReadOnlyArtifacts::GetReadOnlyHeapForIsolate(
92 Isolate* isolate) {
93 return read_only_heap();
94}
95
96void SingleCopyReadOnlyArtifacts::Initialize(Isolate* isolate,
97 std::vector<ReadOnlyPage*>&& pages,
98 const AllocationStats& stats) {
99 // Do not use the platform page allocator when sharing a pointer compression
100 // cage, as the Isolate's page allocator is a BoundedPageAllocator tied to the
101 // shared cage.
102 page_allocator_ = COMPRESS_POINTERS_IN_SHARED_CAGE_BOOLfalse
103 ? isolate->page_allocator()
104 : GetPlatformPageAllocator();
105 pages_ = std::move(pages);
106 set_accounting_stats(stats);
107 set_shared_read_only_space(
108 std::make_unique<SharedReadOnlySpace>(isolate->heap(), this));
109}
110
111void SingleCopyReadOnlyArtifacts::ReinstallReadOnlySpace(Isolate* isolate) {
112 isolate->heap()->ReplaceReadOnlySpace(shared_read_only_space());
113}
114
115void SingleCopyReadOnlyArtifacts::VerifyHeapAndSpaceRelationships(
116 Isolate* isolate) {
117 DCHECK_EQ(read_only_heap()->read_only_space(), shared_read_only_space())((void) 0);
118
119 // Confirm the Isolate is using the shared ReadOnlyHeap and ReadOnlySpace.
120 DCHECK_EQ(read_only_heap(), isolate->read_only_heap())((void) 0);
121 DCHECK_EQ(shared_read_only_space(), isolate->heap()->read_only_space())((void) 0);
122}
123
124void PointerCompressedReadOnlyArtifacts::InitializeRootsFrom(Isolate* isolate) {
125 auto isolate_ro_roots =
126 isolate->roots_table().read_only_roots_begin().location();
127 CopyAndRebaseRoots(isolate_ro_roots, read_only_roots_, 0);
128}
129
130void PointerCompressedReadOnlyArtifacts::InitializeRootsIn(Isolate* isolate) {
131 auto isolate_ro_roots =
132 isolate->roots_table().read_only_roots_begin().location();
133 CopyAndRebaseRoots(read_only_roots_, isolate_ro_roots,
134 isolate->isolate_root());
135}
136
137SharedReadOnlySpace* PointerCompressedReadOnlyArtifacts::CreateReadOnlySpace(
138 Isolate* isolate) {
139 AllocationStats new_stats;
140 new_stats.IncreaseCapacity(accounting_stats().Capacity());
141
142 std::vector<std::unique_ptr<v8::PageAllocator::SharedMemoryMapping>> mappings;
143 std::vector<ReadOnlyPage*> pages;
144 Address isolate_root = isolate->isolate_root();
145 for (size_t i = 0; i < pages_.size(); ++i) {
146 const ReadOnlyPage* page = pages_[i];
147 const Tagged_t offset = OffsetForPage(i);
148 Address new_address = isolate_root + offset;
149 ReadOnlyPage* new_page = nullptr;
150 bool success = isolate->heap()
151 ->memory_allocator()
152 ->data_page_allocator()
153 ->ReserveForSharedMemoryMapping(
154 reinterpret_cast<void*>(new_address), page->size());
155 CHECK(success)do { if ((__builtin_expect(!!(!(success)), 0))) { V8_Fatal("Check failed: %s."
, "success"); } } while (false)
;
156 auto shared_memory = RemapPageTo(i, new_address, new_page);
157 // Later it's possible that this might fail, but for now on Linux this is
158 // not possible. When we move onto windows, it's not possible to reserve
159 // memory and then map into the middle of it at which point we will have to
160 // reserve the memory free it and then attempt to remap to it which could
161 // fail. At that point this will need to change.
162 CHECK(shared_memory)do { if ((__builtin_expect(!!(!(shared_memory)), 0))) { V8_Fatal
("Check failed: %s.", "shared_memory"); } } while (false)
;
163 CHECK_NOT_NULL(new_page)do { if ((__builtin_expect(!!(!((new_page) != nullptr)), 0)))
{ V8_Fatal("Check failed: %s.", "(new_page) != nullptr"); } }
while (false)
;
164
165 new_stats.IncreaseAllocatedBytes(page->allocated_bytes(), new_page);
166 mappings.push_back(std::move(shared_memory));
167 pages.push_back(new_page);
168 }
169
170 auto* shared_read_only_space =
171 new SharedReadOnlySpace(isolate->heap(), std::move(pages),
172 std::move(mappings), std::move(new_stats));
173 return shared_read_only_space;
174}
175
176ReadOnlyHeap* PointerCompressedReadOnlyArtifacts::GetReadOnlyHeapForIsolate(
177 Isolate* isolate) {
178 DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared())((void) 0);
179 InitializeRootsIn(isolate);
180
181 SharedReadOnlySpace* shared_read_only_space = CreateReadOnlySpace(isolate);
182 ReadOnlyHeap* read_only_heap = new ReadOnlyHeap(shared_read_only_space);
183
184 // TODO(v8:10699): The cache should just live uncompressed in
185 // ReadOnlyArtifacts and be decompressed on the fly.
186 auto original_cache = read_only_heap_->read_only_object_cache_;
187 auto& cache = read_only_heap->read_only_object_cache_;
188 Address isolate_root = isolate->isolate_root();
189 for (Object original_object : original_cache) {
190 Address original_address = original_object.ptr();
191 Address new_address = isolate_root + CompressTagged(original_address);
192 Object new_object = Object(new_address);
193 cache.push_back(new_object);
194 }
195
196 return read_only_heap;
197}
198
199std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>
200PointerCompressedReadOnlyArtifacts::RemapPageTo(size_t i, Address new_address,
201 ReadOnlyPage*& new_page) {
202 std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> mapping =
203 shared_memory_[i]->RemapTo(reinterpret_cast<void*>(new_address));
204 if (mapping) {
205 new_page = static_cast<ReadOnlyPage*>(reinterpret_cast<void*>(new_address));
206 return mapping;
207 } else {
208 return {};
209 }
210}
211
212void PointerCompressedReadOnlyArtifacts::Initialize(
213 Isolate* isolate, std::vector<ReadOnlyPage*>&& pages,
214 const AllocationStats& stats) {
215 DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared())((void) 0);
216 DCHECK(pages_.empty())((void) 0);
217 DCHECK(!pages.empty())((void) 0);
218
219 // It's not possible to copy the AllocationStats directly as the new pages
220 // will be mapped to different addresses.
221 stats_.IncreaseCapacity(stats.Capacity());
222
223 v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
224 DCHECK(page_allocator->CanAllocateSharedPages())((void) 0);
225
226 for (const ReadOnlyPage* page : pages) {
227 size_t size = RoundUp(page->size(), page_allocator->AllocatePageSize());
228 // 1. Allocate some new memory for a shared copy of the page and copy the
229 // original contents into it. Doesn't need to be V8 page aligned, since
230 // we'll never use it directly.
231 auto shared_memory = page_allocator->AllocateSharedPages(size, page);
232 void* ptr = shared_memory->GetMemory();
233 CHECK_NOT_NULL(ptr)do { if ((__builtin_expect(!!(!((ptr) != nullptr)), 0))) { V8_Fatal
("Check failed: %s.", "(ptr) != nullptr"); } } while (false)
;
234
235 // 2. Copy the contents of the original page into the shared page.
236 ReadOnlyPage* new_page = reinterpret_cast<ReadOnlyPage*>(ptr);
237
238 pages_.push_back(new_page);
239 shared_memory_.push_back(std::move(shared_memory));
240 // This is just CompressTagged but inlined so it will always compile.
241 Tagged_t compressed_address = CompressTagged(page->address());
242 page_offsets_.push_back(compressed_address);
243
244 // 3. Update the accounting stats so the allocated bytes are for the new
245 // shared page rather than the original.
246 stats_.IncreaseAllocatedBytes(page->allocated_bytes(), new_page);
247 }
248
249 InitializeRootsFrom(isolate);
250 set_shared_read_only_space(
251 std::make_unique<SharedReadOnlySpace>(isolate->heap(), this));
252}
253
254void PointerCompressedReadOnlyArtifacts::ReinstallReadOnlySpace(
255 Isolate* isolate) {
256 // We need to build a new SharedReadOnlySpace that occupies the same memory as
257 // the original one, so first the original space's pages must be freed.
258 Heap* heap = isolate->heap();
259 heap->read_only_space()->TearDown(heap->memory_allocator());
260
261 heap->ReplaceReadOnlySpace(CreateReadOnlySpace(heap->isolate()));
262
263 DCHECK_NE(heap->read_only_space(), shared_read_only_space())((void) 0);
264
265 // Also recreate the ReadOnlyHeap using the this space.
266 auto* ro_heap = new ReadOnlyHeap(isolate->read_only_heap(),
267 isolate->heap()->read_only_space());
268 isolate->set_read_only_heap(ro_heap);
269
270 DCHECK_NE(*isolate->roots_table().read_only_roots_begin().location(), 0)((void) 0);
271}
272
273void PointerCompressedReadOnlyArtifacts::VerifyHeapAndSpaceRelationships(
274 Isolate* isolate) {
275 // Confirm the canonical versions of the ReadOnlySpace/ReadOnlyHeap from the
276 // ReadOnlyArtifacts are not accidentally present in a real Isolate (which
277 // might destroy them) and the ReadOnlyHeaps and Spaces are correctly
278 // associated with each other.
279 DCHECK_NE(shared_read_only_space(), isolate->heap()->read_only_space())((void) 0);
280 DCHECK_NE(read_only_heap(), isolate->read_only_heap())((void) 0);
281 DCHECK_EQ(read_only_heap()->read_only_space(), shared_read_only_space())((void) 0);
282 DCHECK_EQ(isolate->read_only_heap()->read_only_space(),((void) 0)
283 isolate->heap()->read_only_space())((void) 0);
284}
285
286// -----------------------------------------------------------------------------
287// ReadOnlySpace implementation
288
289ReadOnlySpace::ReadOnlySpace(Heap* heap)
290 : BaseSpace(heap, RO_SPACE),
291 top_(kNullAddress),
292 limit_(kNullAddress),
293 is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()),
294 capacity_(0),
295 area_size_(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE)) {}
296
297// Needs to be defined in the cc file to force the vtable to be emitted in
298// component builds.
299ReadOnlySpace::~ReadOnlySpace() = default;
300
301void SharedReadOnlySpace::TearDown(MemoryAllocator* memory_allocator) {
302 // SharedReadOnlySpaces do not tear down their own pages since they are either
303 // freed down by the ReadOnlyArtifacts that contains them or in the case of
304 // pointer compression, they are freed when the SharedMemoryMappings are
305 // freed.
306 pages_.resize(0);
307 accounting_stats_.Clear();
308}
309
310void ReadOnlySpace::TearDown(MemoryAllocator* memory_allocator) {
311 for (ReadOnlyPage* chunk : pages_) {
312 memory_allocator->FreeReadOnlyPage(chunk);
313 }
314 pages_.resize(0);
315 accounting_stats_.Clear();
316}
317
318void ReadOnlySpace::DetachPagesAndAddToArtifacts(
319 std::shared_ptr<ReadOnlyArtifacts> artifacts) {
320 DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared())((void) 0);
321
322 Heap* heap = ReadOnlySpace::heap();
323 // Without pointer compression in a per-Isolate cage, ReadOnlySpace pages are
324 // directly shared between all heaps and so must be unregistered from their
325 // originating allocator.
326 Seal(COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOLfalse
327 ? SealMode::kDetachFromHeap
328 : SealMode::kDetachFromHeapAndUnregisterMemory);
329 artifacts->Initialize(heap->isolate(), std::move(pages_), accounting_stats_);
330}
331
332ReadOnlyPage::ReadOnlyPage(Heap* heap, BaseSpace* space, size_t chunk_size,
333 Address area_start, Address area_end,
334 VirtualMemory reservation)
335 : BasicMemoryChunk(heap, space, chunk_size, area_start, area_end,
336 std::move(reservation)) {
337 allocated_bytes_ = 0;
338 SetFlags(Flag::NEVER_EVACUATE | Flag::READ_ONLY_HEAP);
339 heap->incremental_marking()
340 ->non_atomic_marking_state()
341 ->bitmap(this)
342 ->MarkAllBits();
343}
344
345void ReadOnlyPage::MakeHeaderRelocatable() {
346 heap_ = nullptr;
347 owner_ = nullptr;
348 reservation_.Reset();
349}
350
351void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
352 PageAllocator::Permission access) {
353 for (BasicMemoryChunk* chunk : pages_) {
354 // Read only pages don't have valid reservation object so we get proper
355 // page allocator manually.
356 v8::PageAllocator* page_allocator =
357 memory_allocator->page_allocator(NOT_EXECUTABLE);
358 CHECK(SetPermissions(page_allocator, chunk->address(), chunk->size(),do { if ((__builtin_expect(!!(!(SetPermissions(page_allocator
, chunk->address(), chunk->size(), access))), 0))) { V8_Fatal
("Check failed: %s.", "SetPermissions(page_allocator, chunk->address(), chunk->size(), access)"
); } } while (false)
359 access))do { if ((__builtin_expect(!!(!(SetPermissions(page_allocator
, chunk->address(), chunk->size(), access))), 0))) { V8_Fatal
("Check failed: %s.", "SetPermissions(page_allocator, chunk->address(), chunk->size(), access)"
); } } while (false)
;
360 }
361}
362
363// After we have booted, we have created a map which represents free space
364// on the heap. If there was already a free list then the elements on it
365// were created with the wrong FreeSpaceMap (normally nullptr), so we need to
366// fix them.
367void ReadOnlySpace::RepairFreeSpacesAfterDeserialization() {
368 BasicMemoryChunk::UpdateHighWaterMark(top_);
369 // Each page may have a small free space that is not tracked by a free list.
370 // Those free spaces still contain null as their map pointer.
371 // Overwrite them with new fillers.
372 for (BasicMemoryChunk* chunk : pages_) {
373 Address start = chunk->HighWaterMark();
374 Address end = chunk->area_end();
375 // Put a filler object in the gap between the end of the allocated objects
376 // and the end of the allocatable area.
377 if (start < end) {
378 heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
379 ClearRecordedSlots::kNo);
380 }
381 }
382}
383
384void ReadOnlySpace::ClearStringPaddingIfNeeded() {
385 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOLfalse) {
386 // TODO(v8:11641): Revisit this once third-party heap supports iteration.
387 return;
388 }
389 if (is_string_padding_cleared_) return;
390
391 ReadOnlyHeapObjectIterator iterator(this);
392 for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
393 if (o.IsSeqOneByteString()) {
394 SeqOneByteString::cast(o).clear_padding();
395 } else if (o.IsSeqTwoByteString()) {
396 SeqTwoByteString::cast(o).clear_padding();
397 }
398 }
399 is_string_padding_cleared_ = true;
400}
401
402void ReadOnlySpace::Seal(SealMode ro_mode) {
403 DCHECK(!is_marked_read_only_)((void) 0);
404
405 FreeLinearAllocationArea();
406 is_marked_read_only_ = true;
407 auto* memory_allocator = heap()->memory_allocator();
408
409 if (ro_mode != SealMode::kDoNotDetachFromHeap) {
410 DetachFromHeap();
411 for (ReadOnlyPage* p : pages_) {
412 if (ro_mode == SealMode::kDetachFromHeapAndUnregisterMemory) {
413 memory_allocator->UnregisterReadOnlyPage(p);
414 }
415 if (ReadOnlyHeap::IsReadOnlySpaceShared()) {
416 p->MakeHeaderRelocatable();
417 }
418 }
419 }
420
421 SetPermissionsForPages(memory_allocator, PageAllocator::kRead);
422}
423
424void ReadOnlySpace::Unseal() {
425 DCHECK(is_marked_read_only_)((void) 0);
426 if (!pages_.empty()) {
427 SetPermissionsForPages(heap()->memory_allocator(),
428 PageAllocator::kReadWrite);
429 }
430 is_marked_read_only_ = false;
431}
432
433bool ReadOnlySpace::ContainsSlow(Address addr) const {
434 BasicMemoryChunk* c = BasicMemoryChunk::FromAddress(addr);
435 for (BasicMemoryChunk* chunk : pages_) {
436 if (chunk == c) return true;
437 }
438 return false;
439}
440
441namespace {
442// Only iterates over a single chunk as the chunk iteration is done externally.
443class ReadOnlySpaceObjectIterator : public ObjectIterator {
444 public:
445 ReadOnlySpaceObjectIterator(const Heap* heap, const ReadOnlySpace* space,
446 BasicMemoryChunk* chunk)
447 : cur_addr_(kNullAddress), cur_end_(kNullAddress), space_(space) {}
448
449 // Advance to the next object, skipping free spaces and other fillers and
450 // skipping the special garbage section of which there is one per space.
451 // Returns nullptr when the iteration has ended.
452 HeapObject Next() override {
453 HeapObject next_obj = FromCurrentPage();
454 if (!next_obj.is_null()) return next_obj;
455 return HeapObject();
456 }
457
458 private:
459 HeapObject FromCurrentPage() {
460 while (cur_addr_ != cur_end_) {
461 if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
462 cur_addr_ = space_->limit();
463 continue;
464 }
465 HeapObject obj = HeapObject::FromAddress(cur_addr_);
466 const int obj_size = obj.Size();
467 cur_addr_ += obj_size;
468 DCHECK_LE(cur_addr_, cur_end_)((void) 0);
469 if (!obj.IsFreeSpaceOrFiller()) {
470 if (obj.IsCode()) {
471 DCHECK(Code::cast(obj).is_builtin())((void) 0);
472 DCHECK_CODEOBJECT_SIZE(obj_size, space_)((void) 0);
473 } else {
474 DCHECK_OBJECT_SIZE(obj_size)((void) 0);
475 }
476 return obj;
477 }
478 }
479 return HeapObject();
480 }
481
482 Address cur_addr_; // Current iteration point.
483 Address cur_end_; // End iteration point.
484 const ReadOnlySpace* const space_;
485};
486} // namespace
487
488#ifdef VERIFY_HEAP
489namespace {
490class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
491 public:
492 explicit VerifyReadOnlyPointersVisitor(Heap* heap)
493 : VerifyPointersVisitor(heap) {}
494
495 protected:
496 void VerifyPointers(HeapObject host, MaybeObjectSlot start,
497 MaybeObjectSlot end) override {
498 if (!host.is_null()) {
499 CHECK(ReadOnlyHeap::Contains(host.map()))do { if ((__builtin_expect(!!(!(ReadOnlyHeap::Contains(host.map
()))), 0))) { V8_Fatal("Check failed: %s.", "ReadOnlyHeap::Contains(host.map())"
); } } while (false)
;
500 }
501 VerifyPointersVisitor::VerifyPointers(host, start, end);
502
503 for (MaybeObjectSlot current = start; current < end; ++current) {
504 HeapObject heap_object;
505 if ((*current)->GetHeapObject(&heap_object)) {
506 CHECK(ReadOnlyHeap::Contains(heap_object))do { if ((__builtin_expect(!!(!(ReadOnlyHeap::Contains(heap_object
))), 0))) { V8_Fatal("Check failed: %s.", "ReadOnlyHeap::Contains(heap_object)"
); } } while (false)
;
507 }
508 }
509 }
510};
511} // namespace
512
513void ReadOnlySpace::Verify(Isolate* isolate) const {
514 bool allocation_pointer_found_in_space = top_ == limit_;
515 VerifyReadOnlyPointersVisitor visitor(isolate->heap());
516
517 for (BasicMemoryChunk* page : pages_) {
518 if (ReadOnlyHeap::IsReadOnlySpaceShared()) {
519 CHECK_NULL(page->owner())do { if ((__builtin_expect(!!(!((page->owner()) == nullptr
)), 0))) { V8_Fatal("Check failed: %s.", "(page->owner()) == nullptr"
); } } while (false)
;
520 } else {
521 CHECK_EQ(page->owner(), this)do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base
::pass_value_or_ref<decltype(page->owner())>::type, typename
::v8::base::pass_value_or_ref<decltype(this)>::type>
((page->owner()), (this)); do { if ((__builtin_expect(!!(!
(_cmp)), 0))) { V8_Fatal("Check failed: %s.", "page->owner()"
" " "==" " " "this"); } } while (false); } while (false)
;
522 }
523
524 if (page == Page::FromAllocationAreaAddress(top_)) {
525 allocation_pointer_found_in_space = true;
526 }
527 ReadOnlySpaceObjectIterator it(isolate->heap(), this, page);
528 Address end_of_previous_object = page->area_start();
529 Address top = page->area_end();
530
531 for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
532 CHECK(end_of_previous_object <= object.address())do { if ((__builtin_expect(!!(!(end_of_previous_object <= object
.address())), 0))) { V8_Fatal("Check failed: %s.", "end_of_previous_object <= object.address()"
); } } while (false)
;
533
534 Map map = object.map();
535 CHECK(map.IsMap())do { if ((__builtin_expect(!!(!(map.IsMap())), 0))) { V8_Fatal
("Check failed: %s.", "map.IsMap()"); } } while (false)
;
536
537 // The object itself should look OK.
538 object.ObjectVerify(isolate);
539
540 // All the interior pointers should be contained in the heap.
541 int size = object.Size();
542 object.IterateBody(map, size, &visitor);
543 CHECK(object.address() + size <= top)do { if ((__builtin_expect(!!(!(object.address() + size <=
top)), 0))) { V8_Fatal("Check failed: %s.", "object.address() + size <= top"
); } } while (false)
;
544 end_of_previous_object = object.address() + size;
545
546 CHECK(!object.IsExternalString())do { if ((__builtin_expect(!!(!(!object.IsExternalString())),
0))) { V8_Fatal("Check failed: %s.", "!object.IsExternalString()"
); } } while (false)
;
547 CHECK(!object.IsJSArrayBuffer())do { if ((__builtin_expect(!!(!(!object.IsJSArrayBuffer())), 0
))) { V8_Fatal("Check failed: %s.", "!object.IsJSArrayBuffer()"
); } } while (false)
;
548 }
549
550 CHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION))do { if ((__builtin_expect(!!(!(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION
))), 0))) { V8_Fatal("Check failed: %s.", "!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)"
); } } while (false)
;
551 CHECK(!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION))do { if ((__builtin_expect(!!(!(!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION
))), 0))) { V8_Fatal("Check failed: %s.", "!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)"
); } } while (false)
;
552 }
553 CHECK(allocation_pointer_found_in_space)do { if ((__builtin_expect(!!(!(allocation_pointer_found_in_space
)), 0))) { V8_Fatal("Check failed: %s.", "allocation_pointer_found_in_space"
); } } while (false)
;
554
555#ifdef DEBUG
556 VerifyCounters(isolate->heap());
557#endif
558}
559
560#ifdef DEBUG
561void ReadOnlySpace::VerifyCounters(Heap* heap) const {
562 size_t total_capacity = 0;
563 size_t total_allocated = 0;
564 for (BasicMemoryChunk* page : pages_) {
565 total_capacity += page->area_size();
566 ReadOnlySpaceObjectIterator it(heap, this, page);
567 size_t real_allocated = 0;
568 for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
569 if (!object.IsFreeSpaceOrFiller()) {
570 real_allocated += object.Size();
571 }
572 }
573 total_allocated += page->allocated_bytes();
574 // The real size can be smaller than the accounted size if array trimming,
575 // object slack tracking happened after sweeping.
576 DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page))((void) 0);
577 DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page))((void) 0);
578 }
579 DCHECK_EQ(total_capacity, accounting_stats_.Capacity())((void) 0);
580 DCHECK_EQ(total_allocated, accounting_stats_.Size())((void) 0);
581}
582#endif // DEBUG
583#endif // VERIFY_HEAP
584
585size_t ReadOnlySpace::CommittedPhysicalMemory() const {
586 if (!base::OS::HasLazyCommits()) return CommittedMemory();
587 BasicMemoryChunk::UpdateHighWaterMark(top_);
588 size_t size = 0;
589 for (auto* chunk : pages_) {
590 size += chunk->size();
591 }
592
593 return size;
594}
595
596void ReadOnlySpace::FreeLinearAllocationArea() {
597 // Mark the old linear allocation area with a free space map so it can be
598 // skipped when scanning the heap.
599 if (top_ == kNullAddress) {
600 DCHECK_EQ(kNullAddress, limit_)((void) 0);
601 return;
602 }
603
604 // Clear the bits in the unused black area.
605 ReadOnlyPage* page = pages_.back();
606 heap()->incremental_marking()->marking_state()->bitmap(page)->ClearRange(
607 page->AddressToMarkbitIndex(top_), page->AddressToMarkbitIndex(limit_));
608
609 heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_),
610 ClearRecordedSlots::kNo);
611
612 BasicMemoryChunk::UpdateHighWaterMark(top_);
613
614 top_ = kNullAddress;
615 limit_ = kNullAddress;
616}
617
618void ReadOnlySpace::EnsureSpaceForAllocation(int size_in_bytes) {
619 if (top_ + size_in_bytes <= limit_) {
620 return;
621 }
622
623 DCHECK_GE(size_in_bytes, 0)((void) 0);
624
625 FreeLinearAllocationArea();
626
627 BasicMemoryChunk* chunk =
628 heap()->memory_allocator()->AllocateReadOnlyPage(this);
629 capacity_ += AreaSize();
630
631 accounting_stats_.IncreaseCapacity(chunk->area_size());
632 AccountCommitted(chunk->size());
633 CHECK_NOT_NULL(chunk)do { if ((__builtin_expect(!!(!((chunk) != nullptr)), 0))) { V8_Fatal
("Check failed: %s.", "(chunk) != nullptr"); } } while (false
)
;
634 pages_.push_back(static_cast<ReadOnlyPage*>(chunk));
635
636 heap()->CreateFillerObjectAt(chunk->area_start(),
637 static_cast<int>(chunk->area_size()),
638 ClearRecordedSlots::kNo);
639
640 top_ = chunk->area_start();
641 limit_ = chunk->area_end();
642 return;
643}
644
645HeapObject ReadOnlySpace::TryAllocateLinearlyAligned(
646 int size_in_bytes, AllocationAlignment alignment) {
647 Address current_top = top_;
648 int filler_size = Heap::GetFillToAlign(current_top, alignment);
649
650 Address new_top = current_top + filler_size + size_in_bytes;
651 if (new_top > limit_) return HeapObject();
652
653 // Allocation always occurs in the last chunk for RO_SPACE.
654 BasicMemoryChunk* chunk = pages_.back();
655 int allocated_size = filler_size + size_in_bytes;
656 accounting_stats_.IncreaseAllocatedBytes(allocated_size, chunk);
657 chunk->IncreaseAllocatedBytes(allocated_size);
658
659 top_ = new_top;
660 if (filler_size > 0) {
661 return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
662 filler_size);
663 }
664
665 return HeapObject::FromAddress(current_top);
666}
667
668AllocationResult ReadOnlySpace::AllocateRawAligned(
669 int size_in_bytes, AllocationAlignment alignment) {
670 DCHECK(!FLAG_enable_third_party_heap)((void) 0);
671 DCHECK(!IsDetached())((void) 0);
672 int allocation_size = size_in_bytes;
673
674 HeapObject object = TryAllocateLinearlyAligned(allocation_size, alignment);
675 if (object.is_null()) {
676 // We don't know exactly how much filler we need to align until space is
677 // allocated, so assume the worst case.
678 EnsureSpaceForAllocation(allocation_size +
679 Heap::GetMaximumFillToAlign(alignment));
680 allocation_size = size_in_bytes;
Value stored to 'allocation_size' is never read
681 object = TryAllocateLinearlyAligned(size_in_bytes, alignment);
682 CHECK(!object.is_null())do { if ((__builtin_expect(!!(!(!object.is_null())), 0))) { V8_Fatal
("Check failed: %s.", "!object.is_null()"); } } while (false)
;
683 }
684 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes)static_assert((std::is_pointer<decltype(object.address())>
::value || std::is_same<v8::base::Address, decltype(object
.address())>::value), "static type violation"); static_assert
(std::is_convertible<decltype(size_in_bytes), size_t>::
value, "static type violation"); do { ::v8::base::Use unused_tmp_array_for_use_macro
[]{object.address(), size_in_bytes}; (void)unused_tmp_array_for_use_macro
; } while (false)
;
685
686 return AllocationResult::FromObject(object);
687}
688
689AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
690 DCHECK(!IsDetached())((void) 0);
691 EnsureSpaceForAllocation(size_in_bytes);
692 Address current_top = top_;
693 Address new_top = current_top + size_in_bytes;
694 DCHECK_LE(new_top, limit_)((void) 0);
695 top_ = new_top;
696 HeapObject object = HeapObject::FromAddress(current_top);
697
698 DCHECK(!object.is_null())((void) 0);
699 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes)static_assert((std::is_pointer<decltype(object.address())>
::value || std::is_same<v8::base::Address, decltype(object
.address())>::value), "static type violation"); static_assert
(std::is_convertible<decltype(size_in_bytes), size_t>::
value, "static type violation"); do { ::v8::base::Use unused_tmp_array_for_use_macro
[]{object.address(), size_in_bytes}; (void)unused_tmp_array_for_use_macro
; } while (false)
;
700
701 // Allocation always occurs in the last chunk for RO_SPACE.
702 BasicMemoryChunk* chunk = pages_.back();
703 accounting_stats_.IncreaseAllocatedBytes(size_in_bytes, chunk);
704 chunk->IncreaseAllocatedBytes(size_in_bytes);
705
706 return AllocationResult::FromObject(object);
707}
708
709AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
710 AllocationAlignment alignment) {
711 AllocationResult result =
712 USE_ALLOCATION_ALIGNMENT_BOOLfalse && alignment != kTaggedAligned
713 ? AllocateRawAligned(size_in_bytes, alignment)
714 : AllocateRawUnaligned(size_in_bytes);
715 HeapObject heap_obj;
716 if (result.To(&heap_obj)) {
717 DCHECK(heap()->incremental_marking()->marking_state()->IsBlack(heap_obj))((void) 0);
718 }
719 return result;
720}
721
722size_t ReadOnlyPage::ShrinkToHighWaterMark() {
723 // Shrink pages to high water mark. The water mark points either to a filler
724 // or the area_end.
725 HeapObject filler = HeapObject::FromAddress(HighWaterMark());
726 if (filler.address() == area_end()) return 0;
727 CHECK(filler.IsFreeSpaceOrFiller())do { if ((__builtin_expect(!!(!(filler.IsFreeSpaceOrFiller())
), 0))) { V8_Fatal("Check failed: %s.", "filler.IsFreeSpaceOrFiller()"
); } } while (false)
;
728 DCHECK_EQ(filler.address() + filler.Size(), area_end())((void) 0);
729
730 size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()),
731 MemoryAllocator::GetCommitPageSize());
732 if (unused > 0) {
733 DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize())((void) 0);
734 if (FLAG_trace_gc_verbose) {
735 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
736 reinterpret_cast<void*>(this),
737 reinterpret_cast<void*>(area_end()),
738 reinterpret_cast<void*>(area_end() - unused));
739 }
740 heap()->CreateFillerObjectAt(
741 filler.address(),
742 static_cast<int>(area_end() - filler.address() - unused),
743 ClearRecordedSlots::kNo);
744 heap()->memory_allocator()->PartialFreeMemory(
745 this, address() + size() - unused, unused, area_end() - unused);
746 if (filler.address() != area_end()) {
747 CHECK(filler.IsFreeSpaceOrFiller())do { if ((__builtin_expect(!!(!(filler.IsFreeSpaceOrFiller())
), 0))) { V8_Fatal("Check failed: %s.", "filler.IsFreeSpaceOrFiller()"
); } } while (false)
;
748 CHECK_EQ(filler.address() + filler.Size(), area_end())do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base
::pass_value_or_ref<decltype(filler.address() + filler.Size
())>::type, typename ::v8::base::pass_value_or_ref<decltype
(area_end())>::type>((filler.address() + filler.Size())
, (area_end())); do { if ((__builtin_expect(!!(!(_cmp)), 0)))
{ V8_Fatal("Check failed: %s.", "filler.address() + filler.Size()"
" " "==" " " "area_end()"); } } while (false); } while (false
)
;
749 }
750 }
751 return unused;
752}
753
754void ReadOnlySpace::ShrinkPages() {
755 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOLfalse) return;
756 BasicMemoryChunk::UpdateHighWaterMark(top_);
757 heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_),
758 ClearRecordedSlots::kNo);
759
760 for (ReadOnlyPage* chunk : pages_) {
761 DCHECK(chunk->IsFlagSet(Page::NEVER_EVACUATE))((void) 0);
762 size_t unused = chunk->ShrinkToHighWaterMark();
763 capacity_ -= unused;
764 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
765 AccountUncommitted(unused);
766 }
767 limit_ = pages_.back()->area_end();
768}
769
770SharedReadOnlySpace::SharedReadOnlySpace(
771 Heap* heap, PointerCompressedReadOnlyArtifacts* artifacts)
772 : SharedReadOnlySpace(heap) {
773 // This constructor should only be used when RO_SPACE is shared with pointer
774 // compression in a per-Isolate cage.
775 DCHECK(V8_SHARED_RO_HEAP_BOOL)((void) 0);
776 DCHECK(COMPRESS_POINTERS_BOOL)((void) 0);
777 DCHECK(COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL)((void) 0);
778 DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared())((void) 0);
779 DCHECK(!artifacts->pages().empty())((void) 0);
780
781 accounting_stats_.IncreaseCapacity(artifacts->accounting_stats().Capacity());
782 for (ReadOnlyPage* page : artifacts->pages()) {
783 pages_.push_back(page);
784 accounting_stats_.IncreaseAllocatedBytes(page->allocated_bytes(), page);
785 }
786}
787
788SharedReadOnlySpace::SharedReadOnlySpace(
789 Heap* heap, std::vector<ReadOnlyPage*>&& new_pages,
790 std::vector<std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>>&&
791 mappings,
792 AllocationStats&& new_stats)
793 : SharedReadOnlySpace(heap) {
794 DCHECK(V8_SHARED_RO_HEAP_BOOL)((void) 0);
795 DCHECK(COMPRESS_POINTERS_BOOL)((void) 0);
796 DCHECK(COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL)((void) 0);
797 DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared())((void) 0);
798
799 accounting_stats_ = std::move(new_stats);
800 pages_ = std::move(new_pages);
801 shared_memory_mappings_ = std::move(mappings);
802}
803
804SharedReadOnlySpace::SharedReadOnlySpace(Heap* heap,
805 SingleCopyReadOnlyArtifacts* artifacts)
806 : SharedReadOnlySpace(heap) {
807 // This constructor should only be used when RO_SPACE is shared without
808 // pointer compression in a per-Isolate cage.
809 DCHECK(V8_SHARED_RO_HEAP_BOOL)((void) 0);
810 DCHECK(!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL)((void) 0);
811 accounting_stats_ = artifacts->accounting_stats();
812 pages_ = artifacts->pages();
813}
814
815} // namespace internal
816} // namespace v8