Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RPageStorageFile.cxx
Go to the documentation of this file.
1/// \file RPageStorageFile.cxx
2/// \ingroup NTuple
3/// \author Jakob Blomer <jblomer@cern.ch>
4/// \date 2019-11-25
5
6/*************************************************************************
7 * Copyright (C) 1995-2019, Rene Brun and Fons Rademakers. *
8 * All rights reserved. *
9 * *
10 * For the licensing terms see $ROOTSYS/LICENSE. *
11 * For the list of contributors see $ROOTSYS/README/CREDITS. *
12 *************************************************************************/
13
14#include <ROOT/RCluster.hxx>
15#include <ROOT/RLogger.hxx>
17#include <ROOT/RNTupleModel.hxx>
19#include <ROOT/RNTupleZip.hxx>
20#include <ROOT/RPage.hxx>
22#include <ROOT/RPagePool.hxx>
24#include <ROOT/RRawFile.hxx>
26#include <ROOT/RNTupleTypes.hxx>
27#include <ROOT/RNTupleUtils.hxx>
28
29#include <RVersion.h>
30#include <TDirectory.h>
31#include <TError.h>
33
34#include <algorithm>
35#include <cstdio>
36#include <cstdlib>
37#include <cstring>
38#include <iterator>
39#include <limits>
40#include <utility>
41
42#include <functional>
43#include <mutex>
44
46
53
60
67
74
75ROOT::Internal::RPageSinkFile::RPageSinkFile(std::unique_ptr<ROOT::Internal::RNTupleFileWriter> writer,
76 const ROOT::RNTupleWriteOptions &options)
77 : RPageSinkFile(writer->GetNTupleName(), options)
78{
79 fWriter = std::move(writer);
80}
81
83
85{
87 auto szZipHeader =
88 RNTupleCompressor::Zip(serializedHeader, length, GetWriteOptions().GetCompression(), zipBuffer.get());
89 fWriter->WriteNTupleHeader(zipBuffer.get(), szZipHeader, length);
90}
91
94{
96
97 auto fnAddStreamerInfo = [this](const ROOT::RFieldBase *field) {
98 const TClass *cl = nullptr;
99 if (auto classField = dynamic_cast<const RClassField *>(field)) {
100 cl = classField->GetClass();
101 } else if (auto streamerField = dynamic_cast<const RStreamerField *>(field)) {
102 cl = streamerField->GetClass();
103 }
104 if (!cl)
105 return;
106
107 auto streamerInfo = cl->GetStreamerInfo(field->GetTypeVersion());
108 if (!streamerInfo) {
109 throw RException(R__FAIL(std::string("cannot get streamerInfo for ") + cl->GetName() + " [" +
110 std::to_string(field->GetTypeVersion()) + "]"));
111 }
112 fInfosOfClassFields[streamerInfo->GetNumber()] = streamerInfo;
113 };
114
115 for (const auto field : changeset.fAddedFields) {
117 for (const auto &subField : *field) {
119 }
120 }
121}
122
125{
126 std::uint64_t offsetData;
127 {
128 RNTupleAtomicTimer timer(fCounters->fTimeWallWrite, fCounters->fTimeCpuWrite);
129 offsetData = fWriter->WriteBlob(sealedPage.GetBuffer(), sealedPage.GetBufferSize(), bytesPacked);
130 }
131
133 result.SetPosition(offsetData);
134 result.SetNBytesOnStorage(sealedPage.GetDataSize());
135 fCounters->fNPageCommitted.Inc();
136 fCounters->fSzWritePayload.Add(sealedPage.GetBufferSize());
137 fNBytesCurrentCluster += sealedPage.GetBufferSize();
138 return result;
139}
140
143{
144 auto element = columnHandle.fColumn->GetElement();
146 {
147 RNTupleAtomicTimer timer(fCounters->fTimeWallZip, fCounters->fTimeCpuZip);
148 sealedPage = SealPage(page, *element);
149 }
150
151 fCounters->fSzZip.Add(page.GetNBytes());
152 return WriteSealedPage(sealedPage, element->GetPackedSize(page.GetNElements()));
153}
154
157{
158 const auto nBits = fDescriptorBuilder.GetDescriptor().GetColumnDescriptor(physicalColumnId).GetBitsOnStorage();
159 const auto bytesPacked = (nBits * sealedPage.GetNElements() + 7) / 8;
160 return WriteSealedPage(sealedPage, bytesPacked);
161}
162
164{
165 RNTupleAtomicTimer timer(fCounters->fTimeWallWrite, fCounters->fTimeCpuWrite);
166
167 std::uint64_t offset = fWriter->ReserveBlob(batch.fSize, batch.fBytesPacked);
168
169 locators.reserve(locators.size() + batch.fSealedPages.size());
170
171 for (const auto *pagePtr : batch.fSealedPages) {
172 fWriter->WriteIntoReservedBlob(pagePtr->GetBuffer(), pagePtr->GetBufferSize(), offset);
174 locator.SetPosition(offset);
175 locator.SetNBytesOnStorage(pagePtr->GetDataSize());
176 locators.push_back(locator);
177 offset += pagePtr->GetBufferSize();
178 }
179
180 fCounters->fNPageCommitted.Add(batch.fSealedPages.size());
181 fCounters->fSzWritePayload.Add(batch.fSize);
182 fNBytesCurrentCluster += batch.fSize;
183
184 batch.fSize = 0;
185 batch.fBytesPacked = 0;
186 batch.fSealedPages.clear();
187}
188
189std::vector<ROOT::RNTupleLocator>
190ROOT::Internal::RPageSinkFile::CommitSealedPageVImpl(std::span<RPageStorage::RSealedPageGroup> ranges,
191 const std::vector<bool> &mask)
192{
193 const std::uint64_t maxKeySize = fOptions->GetMaxKeySize();
194
196 std::vector<RNTupleLocator> locators;
197
198 std::size_t iPage = 0;
199 for (auto rangeIt = ranges.begin(); rangeIt != ranges.end(); ++rangeIt) {
200 auto &range = *rangeIt;
201 if (range.fFirst == range.fLast) {
202 // Skip empty ranges, they might not have a physical column ID!
203 continue;
204 }
205
206 const auto bitsOnStorage =
207 fDescriptorBuilder.GetDescriptor().GetColumnDescriptor(range.fPhysicalColumnId).GetBitsOnStorage();
208
209 for (auto sealedPageIt = range.fFirst; sealedPageIt != range.fLast; ++sealedPageIt, ++iPage) {
210 if (!mask[iPage])
211 continue;
212
213 const auto bytesPacked = (bitsOnStorage * sealedPageIt->GetNElements() + 7) / 8;
214
215 if (batch.fSize > 0 && batch.fSize + sealedPageIt->GetBufferSize() > maxKeySize) {
216 /**
217 * Adding this page would exceed maxKeySize. Since we always want to write into a single key
218 * with vectorized writes, we commit the current set of pages before proceeding.
219 * NOTE: we do this *before* checking if sealedPageIt->GetBufferSize() > maxKeySize to guarantee that
220 * we always flush the current batch before doing an individual WriteBlob. This way we
221 * preserve the assumption that a CommitBatch always contain a sequential set of pages.
222 */
223 CommitBatchOfPages(batch, locators);
224 }
225
226 if (sealedPageIt->GetBufferSize() > maxKeySize) {
227 // This page alone is bigger than maxKeySize: save it by itself, since it will need to be
228 // split into multiple keys.
229
230 // Since this check implies the previous check on batchSize + newSize > maxSize, we should
231 // already have committed the current batch before writing this page.
232 assert(batch.fSize == 0);
233
234 std::uint64_t offset =
235 fWriter->WriteBlob(sealedPageIt->GetBuffer(), sealedPageIt->GetBufferSize(), bytesPacked);
237 locator.SetPosition(offset);
238 locator.SetNBytesOnStorage(sealedPageIt->GetDataSize());
239 locators.push_back(locator);
240
241 fCounters->fNPageCommitted.Inc();
242 fCounters->fSzWritePayload.Add(sealedPageIt->GetBufferSize());
243 fNBytesCurrentCluster += sealedPageIt->GetBufferSize();
244
245 } else {
246 batch.fSealedPages.emplace_back(&(*sealedPageIt));
247 batch.fSize += sealedPageIt->GetBufferSize();
248 batch.fBytesPacked += bytesPacked;
249 }
250 }
251 }
252
253 if (batch.fSize > 0) {
254 CommitBatchOfPages(batch, locators);
255 }
256
257 return locators;
258}
259
261{
262 auto result = fNBytesCurrentCluster;
263 fNBytesCurrentCluster = 0;
264 return result;
265}
266
269{
271 auto szPageListZip =
272 RNTupleCompressor::Zip(serializedPageList, length, GetWriteOptions().GetCompression(), bufPageListZip.get());
273
275 result.SetNBytesOnStorage(szPageListZip);
276 result.SetPosition(fWriter->WriteBlob(bufPageListZip.get(), szPageListZip, length));
277 return result;
278}
279
282{
283 // Add the streamer info records from streamer fields: because of runtime polymorphism we may need to add additional
284 // types not covered by the type names of the class fields
285 for (const auto &extraTypeInfo : fDescriptorBuilder.GetDescriptor().GetExtraTypeInfoIterable()) {
287 continue;
288 // Ideally, we would avoid deserializing the streamer info records of the streamer fields that we just serialized.
289 // However, this happens only once at the end of writing and only when streamer fields are used, so the
290 // preference here is for code simplicity.
291 fInfosOfClassFields.merge(RNTupleSerializer::DeserializeStreamerInfos(extraTypeInfo.GetContent()).Unwrap());
292 }
293 fWriter->UpdateStreamerInfos(fInfosOfClassFields);
294
296 auto szFooterZip =
297 RNTupleCompressor::Zip(serializedFooter, length, GetWriteOptions().GetCompression(), bufFooterZip.get());
298 fWriter->WriteNTupleFooter(bufFooterZip.get(), szFooterZip, length);
299 return fWriter->Commit(GetWriteOptions().GetCompression());
300}
301
302std::unique_ptr<ROOT::Internal::RPageSink>
304{
305 auto writer = fWriter->CloneAsHidden(name);
306 auto cloned = std::unique_ptr<RPageSinkFile>(new RPageSinkFile(std::move(writer), opts));
307 return cloned;
308}
309
310////////////////////////////////////////////////////////////////////////////////
311
317
319 std::unique_ptr<ROOT::Internal::RRawFile> file,
320 const ROOT::RNTupleReadOptions &options)
321 : RPageSourceFile(ntupleName, options)
322{
323 fFile = std::move(file);
326}
327
328ROOT::Internal::RPageSourceFile::RPageSourceFile(std::string_view ntupleName, std::string_view path,
329 const ROOT::RNTupleReadOptions &options)
330 : RPageSourceFile(ntupleName, ROOT::Internal::RRawFile::Create(path), options)
331{
332}
333
334std::unique_ptr<ROOT::Internal::RPageSourceFile>
336{
337 if (!anchor.fFile)
338 throw RException(R__FAIL("This RNTuple object was not streamed from a ROOT file (TFile or descendant)"));
339
340 std::unique_ptr<ROOT::Internal::RRawFile> rawFile;
341 // For local TFiles, TDavixFile, TCurlFile, and TNetXNGFile, we want to open a new RRawFile to take advantage of the
342 // faster reading. We check the exact class name to avoid classes inheriting in ROOT (for example TMemFile) or in
343 // experiment frameworks.
344 std::string className = anchor.fFile->IsA()->GetName();
345 auto url = anchor.fFile->GetEndpointUrl();
346 auto protocol = std::string(url->GetProtocol());
347 if (className == "TFile") {
349 } else if (className == "TDavixFile" || className == "TCurlFile" || className == "TNetXNGFile") {
351 } else {
353 }
354
355 auto pageSource = std::make_unique<RPageSourceFile>("", std::move(rawFile), options);
356 pageSource->fAnchor = anchor;
357 // NOTE: fNTupleName gets set only upon Attach().
358 return pageSource;
359}
360
362{
363 fClusterPool.StopBackgroundThread();
364}
365
366std::unique_ptr<ROOT::Internal::RPageSource>
368 const ROOT::RNTupleReadOptions &options)
369{
370 assert(anchorLink.fLocator.GetType() == RNTupleLocator::kTypeFile);
371
372 const auto anchorPos = anchorLink.fLocator.GetPosition<std::uint64_t>();
373 auto anchor =
374 fReader.GetNTupleProperAtOffset(anchorPos, anchorLink.fLocator.GetNBytesOnStorage(), anchorLink.fLength).Unwrap();
375 auto pageSource = std::make_unique<RPageSourceFile>("", fFile->Clone(), options);
376 pageSource->fAnchor = anchor;
377 // NOTE: fNTupleName gets set only upon Attach().
378 return pageSource;
379}
380
382{
383 // If we constructed the page source with (ntuple name, path), we need to find the anchor first.
384 // Otherwise, the page source was created by OpenFromAnchor()
385 if (!fAnchor) {
386 fAnchor = fReader.GetNTuple(fNTupleName).Unwrap();
387 }
388 fReader.SetMaxKeySize(fAnchor->GetMaxKeySize());
389
390 fDescriptorBuilder.SetVersion(fAnchor->GetVersionEpoch(), fAnchor->GetVersionMajor(), fAnchor->GetVersionMinor(),
391 fAnchor->GetVersionPatch());
392 fDescriptorBuilder.SetOnDiskHeaderSize(fAnchor->GetNBytesHeader());
393 fDescriptorBuilder.AddToOnDiskFooterSize(fAnchor->GetNBytesFooter());
394
395 // Reserve enough space for the compressed and the uncompressed header/footer (see AttachImpl)
396 const auto bufSize = fAnchor->GetNBytesHeader() + fAnchor->GetNBytesFooter() +
397 std::max(fAnchor->GetLenHeader(), fAnchor->GetLenFooter());
398 fStructureBuffer.fBuffer = MakeUninitArray<unsigned char>(bufSize);
399 fStructureBuffer.fPtrHeader = fStructureBuffer.fBuffer.get();
400 fStructureBuffer.fPtrFooter = fStructureBuffer.fBuffer.get() + fAnchor->GetNBytesHeader();
401
402 auto readvLimits = fFile->GetReadVLimits();
403 // Never try to vectorize reads to a split key
404 readvLimits.fMaxSingleSize = std::min<size_t>(readvLimits.fMaxSingleSize, fAnchor->GetMaxKeySize());
405
406 if ((readvLimits.fMaxReqs < 2) ||
407 (std::max(fAnchor->GetNBytesHeader(), fAnchor->GetNBytesFooter()) > readvLimits.fMaxSingleSize) ||
408 (fAnchor->GetNBytesHeader() + fAnchor->GetNBytesFooter() > readvLimits.fMaxTotalSize)) {
409 RNTupleAtomicTimer timer(fCounters->fTimeWallRead, fCounters->fTimeCpuRead);
410 fReader.ReadBuffer(fStructureBuffer.fPtrHeader, fAnchor->GetNBytesHeader(), fAnchor->GetSeekHeader());
411 fReader.ReadBuffer(fStructureBuffer.fPtrFooter, fAnchor->GetNBytesFooter(), fAnchor->GetSeekFooter());
412 fCounters->fNRead.Add(2);
413 } else {
414 RNTupleAtomicTimer timer(fCounters->fTimeWallRead, fCounters->fTimeCpuRead);
415 R__ASSERT(fAnchor->GetNBytesHeader() < std::numeric_limits<std::size_t>::max());
416 R__ASSERT(fAnchor->GetNBytesFooter() < std::numeric_limits<std::size_t>::max());
417 ROOT::Internal::RRawFile::RIOVec readRequests[2] = {{fStructureBuffer.fPtrHeader, fAnchor->GetSeekHeader(),
418 static_cast<std::size_t>(fAnchor->GetNBytesHeader()), 0},
419 {fStructureBuffer.fPtrFooter, fAnchor->GetSeekFooter(),
420 static_cast<std::size_t>(fAnchor->GetNBytesFooter()), 0}};
421 fFile->ReadV(readRequests, 2);
422 fCounters->fNReadV.Inc();
423 }
424}
425
427{
428 auto unzipBuf = reinterpret_cast<unsigned char *>(fStructureBuffer.fPtrFooter) + fAnchor->GetNBytesFooter();
429
430 RNTupleDecompressor::Unzip(fStructureBuffer.fPtrHeader, fAnchor->GetNBytesHeader(), fAnchor->GetLenHeader(),
431 unzipBuf);
432 RNTupleSerializer::DeserializeHeader(unzipBuf, fAnchor->GetLenHeader(), fDescriptorBuilder);
433
434 RNTupleDecompressor::Unzip(fStructureBuffer.fPtrFooter, fAnchor->GetNBytesFooter(), fAnchor->GetLenFooter(),
435 unzipBuf);
436 RNTupleSerializer::DeserializeFooter(unzipBuf, fAnchor->GetLenFooter(), fDescriptorBuilder);
437
438 auto desc = fDescriptorBuilder.MoveDescriptor();
439
440 // fNTupleName is empty if and only if we created this source via CreateFromAnchor. If that's the case, this is the
441 // earliest we can set the name.
442 if (fNTupleName.empty())
443 fNTupleName = desc.GetName();
444
445 std::vector<unsigned char> buffer;
446 for (const auto &cgDesc : desc.GetClusterGroupIterable()) {
447 buffer.resize(std::max<size_t>(buffer.size(),
448 cgDesc.GetPageListLength() + cgDesc.GetPageListLocator().GetNBytesOnStorage()));
449 auto *zipBuffer = buffer.data() + cgDesc.GetPageListLength();
450 fReader.ReadBuffer(zipBuffer, cgDesc.GetPageListLocator().GetNBytesOnStorage(),
451 cgDesc.GetPageListLocator().GetPosition<std::uint64_t>());
452 RNTupleDecompressor::Unzip(zipBuffer, cgDesc.GetPageListLocator().GetNBytesOnStorage(),
453 cgDesc.GetPageListLength(), buffer.data());
454
455 RNTupleSerializer::DeserializePageList(buffer.data(), cgDesc.GetPageListLength(), cgDesc.GetId(), desc, mode);
456 }
457
458 // For the page reads, we rely on the I/O scheduler to define the read requests
459 fFile->SetBuffering(false);
460
461 return desc;
462}
463
466{
467 const auto clusterId = localIndex.GetClusterId();
468
470 {
471 auto descriptorGuard = GetSharedDescriptorGuard();
472 const auto &clusterDescriptor = descriptorGuard->GetClusterDescriptor(clusterId);
473 pageInfo = clusterDescriptor.GetPageRange(physicalColumnId).Find(localIndex.GetIndexInCluster());
474 }
475
476 sealedPage.SetBufferSize(pageInfo.GetLocator().GetNBytesOnStorage() + pageInfo.HasChecksum() * kNBytesPageChecksum);
477 sealedPage.SetNElements(pageInfo.GetNElements());
478 sealedPage.SetHasChecksum(pageInfo.HasChecksum());
479 if (!sealedPage.GetBuffer())
480 return;
481 if (pageInfo.GetLocator().GetType() != RNTupleLocator::kTypePageZero) {
482 fReader.ReadBuffer(const_cast<void *>(sealedPage.GetBuffer()), sealedPage.GetBufferSize(),
483 pageInfo.GetLocator().GetPosition<std::uint64_t>());
484 } else {
485 assert(!pageInfo.HasChecksum());
486 memcpy(const_cast<void *>(sealedPage.GetBuffer()), ROOT::Internal::RPage::GetPageZeroBuffer(),
487 sealedPage.GetBufferSize());
488 }
489
490 sealedPage.VerifyChecksumIfEnabled().ThrowOnError();
491}
492
496{
497 const auto columnId = columnHandle.fPhysicalId;
498 const auto clusterId = clusterInfo.fClusterId;
499 const auto pageInfo = clusterInfo.fPageInfo;
500
501 const auto element = columnHandle.fColumn->GetElement();
502 const auto elementSize = element->GetSize();
503 const auto elementInMemoryType = element->GetIdentifier().fInMemoryType;
504
505 if (pageInfo.GetLocator().GetType() == RNTupleLocator::kTypePageZero) {
506 auto pageZero = fPageAllocator->NewPage(elementSize, pageInfo.GetNElements());
507 pageZero.GrowUnchecked(pageInfo.GetNElements());
508 memset(pageZero.GetBuffer(), 0, pageZero.GetNBytes());
509 pageZero.SetWindow(clusterInfo.fColumnOffset + pageInfo.GetFirstElementIndex(),
511 return fPagePool.RegisterPage(std::move(pageZero), RPagePool::RKey{columnId, elementInMemoryType});
512 }
513
515 sealedPage.SetNElements(pageInfo.GetNElements());
516 sealedPage.SetHasChecksum(pageInfo.HasChecksum());
517 sealedPage.SetBufferSize(pageInfo.GetLocator().GetNBytesOnStorage() + pageInfo.HasChecksum() * kNBytesPageChecksum);
518 std::unique_ptr<unsigned char[]> directReadBuffer; // only used if cluster pool is turned off
519
520 if (fOptions.GetClusterCache() == ROOT::RNTupleReadOptions::EClusterCache::kOff) {
522 {
523 RNTupleAtomicTimer timer(fCounters->fTimeWallRead, fCounters->fTimeCpuRead);
524 fReader.ReadBuffer(directReadBuffer.get(), sealedPage.GetBufferSize(),
525 pageInfo.GetLocator().GetPosition<std::uint64_t>());
526 }
527 fCounters->fNPageRead.Inc();
528 fCounters->fNRead.Inc();
529 fCounters->fSzReadPayload.Add(sealedPage.GetBufferSize());
530 sealedPage.SetBuffer(directReadBuffer.get());
531 } else {
532 if (!fCurrentCluster || (fCurrentCluster->GetId() != clusterId) || !fCurrentCluster->ContainsColumn(columnId))
533 fCurrentCluster = fClusterPool.GetCluster(clusterId, fActivePhysicalColumns.ToColumnSet());
534 R__ASSERT(fCurrentCluster->ContainsColumn(columnId));
535
536 auto cachedPageRef =
538 if (!cachedPageRef.Get().IsNull())
539 return cachedPageRef;
540
541 ROnDiskPage::Key key(columnId, pageInfo.GetPageNumber());
542 auto onDiskPage = fCurrentCluster->GetOnDiskPage(key);
543 R__ASSERT(onDiskPage && (sealedPage.GetBufferSize() == onDiskPage->GetSize()));
544 sealedPage.SetBuffer(onDiskPage->GetAddress());
545 }
546
548 {
549 RNTupleAtomicTimer timer(fCounters->fTimeWallUnzip, fCounters->fTimeCpuUnzip);
550 newPage = UnsealPage(sealedPage, *element).Unwrap();
551 fCounters->fSzUnzip.Add(elementSize * pageInfo.GetNElements());
552 }
553
554 newPage.SetWindow(clusterInfo.fColumnOffset + pageInfo.GetFirstElementIndex(),
556 fCounters->fNPageUnsealed.Inc();
557 return fPagePool.RegisterPage(std::move(newPage), RPagePool::RKey{columnId, elementInMemoryType});
558}
559
560std::unique_ptr<ROOT::Internal::RPageSource> ROOT::Internal::RPageSourceFile::CloneImpl() const
561{
562 auto clone = new RPageSourceFile(fNTupleName, fOptions);
563 clone->fFile = fFile->Clone();
564 clone->fReader = ROOT::Internal::RMiniFileReader(clone->fFile.get());
565 return std::unique_ptr<RPageSourceFile>(clone);
566}
567
568std::unique_ptr<ROOT::Internal::RCluster>
570 std::vector<ROOT::Internal::RRawFile::RIOVec> &readRequests)
571{
572 struct ROnDiskPageLocator {
573 ROOT::DescriptorId_t fColumnId = 0;
574 ROOT::NTupleSize_t fPageNo = 0;
575 std::uint64_t fOffset = 0;
576 std::uint64_t fSize = 0;
577 std::size_t fBufPos = 0;
578 };
579
580 std::vector<ROnDiskPageLocator> onDiskPages;
581 auto activeSize = 0;
582 auto pageZeroMap = std::make_unique<ROnDiskPageMap>();
583 PrepareLoadCluster(
587 const auto &pageLocator = pageInfo.GetLocator();
589 throw RException(R__FAIL("tried to read a page with an unknown locator"));
590 const auto nBytes = pageLocator.GetNBytesOnStorage() + pageInfo.HasChecksum() * kNBytesPageChecksum;
592 onDiskPages.push_back({physicalColumnId, pageNo, pageLocator.GetPosition<std::uint64_t>(), nBytes, 0});
593 });
594
595 // Linearize the page requests by file offset
596 std::sort(onDiskPages.begin(), onDiskPages.end(),
597 [](const ROnDiskPageLocator &a, const ROnDiskPageLocator &b) { return a.fOffset < b.fOffset; });
598
599 // In order to coalesce close-by pages, we collect the sizes of the gaps between pages on disk. We then order
600 // the gaps by size, sum them up and find a cutoff for the largest gap that we tolerate when coalescing pages.
601 // The size of the cutoff is given by the fraction of extra bytes we are willing to read in order to reduce
602 // the number of read requests. We thus schedule the lowest number of requests given a tolerable fraction
603 // of extra bytes.
604 // TODO(jblomer): Eventually we may want to select the parameter at runtime according to link latency and speed,
605 // memory consumption, device block size.
606 float maxOverhead = 0.25 * float(activeSize);
607 std::vector<std::size_t> gaps;
608 if (onDiskPages.size())
609 gaps.reserve(onDiskPages.size() - 1);
610 for (unsigned i = 1; i < onDiskPages.size(); ++i) {
611 std::int64_t gap =
612 static_cast<int64_t>(onDiskPages[i].fOffset) - (onDiskPages[i - 1].fSize + onDiskPages[i - 1].fOffset);
613 gaps.emplace_back(std::max(gap, std::int64_t(0)));
614 // If the pages overlap, substract the overlapped bytes from `activeSize`
615 activeSize += std::min(gap, std::int64_t(0));
616 }
617 std::sort(gaps.begin(), gaps.end());
618 std::size_t gapCut = 0;
619 std::size_t currentGap = 0;
620 float szExtra = 0.0;
621 for (auto g : gaps) {
622 if (g != currentGap) {
624 currentGap = g;
625 }
626 szExtra += g;
627 if (szExtra > maxOverhead)
628 break;
629 }
630
631 // In a first step, we coalesce the read requests and calculate the cluster buffer size.
632 // In a second step, we'll fix-up the memory destinations for the read calls given the
633 // address of the allocated buffer. We must not touch, however, the read requests from previous
634 // calls to PrepareSingleCluster()
635 const auto currentReadRequestIdx = readRequests.size();
636
638 // To simplify the first loop iteration, pretend an empty request starting at the first page's fOffset.
639 if (!onDiskPages.empty())
640 req.fOffset = onDiskPages[0].fOffset;
641 std::size_t szPayload = 0;
642 std::size_t szOverhead = 0;
643 const std::uint64_t maxKeySize = fReader.GetMaxKeySize();
644 for (auto &s : onDiskPages) {
645 R__ASSERT(s.fSize > 0);
646 const std::int64_t readUpTo = req.fOffset + req.fSize;
647 // Note: byte ranges of pages may overlap
648 const std::uint64_t overhead = std::max(static_cast<std::int64_t>(s.fOffset) - readUpTo, std::int64_t(0));
649 const std::uint64_t extent = std::max(static_cast<std::int64_t>(s.fOffset + s.fSize) - readUpTo, std::int64_t(0));
650 if (req.fSize + extent < maxKeySize && overhead <= gapCut) {
653 s.fBufPos = reinterpret_cast<intptr_t>(req.fBuffer) + s.fOffset - req.fOffset;
654 req.fSize += extent;
655 continue;
656 }
657
658 // close the current request and open new one
659 if (req.fSize > 0)
660 readRequests.emplace_back(req);
661
662 req.fBuffer = reinterpret_cast<unsigned char *>(req.fBuffer) + req.fSize;
663 s.fBufPos = reinterpret_cast<intptr_t>(req.fBuffer);
664
665 szPayload += s.fSize;
666 req.fOffset = s.fOffset;
667 req.fSize = s.fSize;
668 }
669 readRequests.emplace_back(req);
670 fCounters->fSzReadPayload.Add(szPayload);
671 fCounters->fSzReadOverhead.Add(szOverhead);
672
673 // Register the on disk pages in a page map
674 auto buffer = new unsigned char[reinterpret_cast<intptr_t>(req.fBuffer) + req.fSize];
675 auto pageMap = std::make_unique<ROOT::Internal::ROnDiskPageMapHeap>(std::unique_ptr<unsigned char[]>(buffer));
676 for (const auto &s : onDiskPages) {
677 ROnDiskPage::Key key(s.fColumnId, s.fPageNo);
678 pageMap->Register(key, ROnDiskPage(buffer + s.fBufPos, s.fSize));
679 }
680 fCounters->fNPageRead.Add(onDiskPages.size());
681 for (auto i = currentReadRequestIdx; i < readRequests.size(); ++i) {
682 readRequests[i].fBuffer = buffer + reinterpret_cast<intptr_t>(readRequests[i].fBuffer);
683 }
684
685 auto cluster = std::make_unique<RCluster>(clusterKey.fClusterId);
686 cluster->Adopt(std::move(pageMap));
687 cluster->Adopt(std::move(pageZeroMap));
688 for (auto colId : clusterKey.fPhysicalColumnSet)
689 cluster->SetColumnAvailable(colId);
690 return cluster;
691}
692
693std::vector<std::unique_ptr<ROOT::Internal::RCluster>>
695{
696 fCounters->fNClusterLoaded.Add(clusterKeys.size());
697
698 std::vector<std::unique_ptr<ROOT::Internal::RCluster>> clusters;
699 std::vector<ROOT::Internal::RRawFile::RIOVec> readRequests;
700
701 clusters.reserve(clusterKeys.size());
702 for (auto key : clusterKeys) {
703 clusters.emplace_back(PrepareSingleCluster(key, readRequests));
704 }
705
706 auto nReqs = readRequests.size();
707 auto readvLimits = fFile->GetReadVLimits();
708 // We never want to do vectorized reads of split blobs, so we limit our single size to maxKeySize.
709 readvLimits.fMaxSingleSize = std::min<size_t>(readvLimits.fMaxSingleSize, fReader.GetMaxKeySize());
710
711 int iReq = 0;
712 while (nReqs > 0) {
713 auto nBatch = std::min(nReqs, readvLimits.fMaxReqs);
714
715 if (readvLimits.HasSizeLimit()) {
716 std::uint64_t totalSize = 0;
717 for (std::size_t i = 0; i < nBatch; ++i) {
718 if (readRequests[iReq + i].fSize > readvLimits.fMaxSingleSize) {
719 nBatch = i;
720 break;
721 }
722
723 totalSize += readRequests[iReq + i].fSize;
724 if (totalSize > readvLimits.fMaxTotalSize) {
725 nBatch = i;
726 break;
727 }
728 }
729 }
730
731 if (nBatch <= 1) {
732 nBatch = 1;
733 RNTupleAtomicTimer timer(fCounters->fTimeWallRead, fCounters->fTimeCpuRead);
734 fReader.ReadBuffer(readRequests[iReq].fBuffer, readRequests[iReq].fSize, readRequests[iReq].fOffset);
735 } else {
736 RNTupleAtomicTimer timer(fCounters->fTimeWallRead, fCounters->fTimeCpuRead);
737 fFile->ReadV(&readRequests[iReq], nBatch);
738 }
739 fCounters->fNReadV.Inc();
740 fCounters->fNRead.Add(nBatch);
741
742 iReq += nBatch;
743 nReqs -= nBatch;
744 }
745
746 return clusters;
747}
748
750{
751 fReader.LoadStreamerInfo();
752}
fBuffer
dim_t fSize
#define R__FAIL(msg)
Short-hand to return an RResult<T> in an error state; the RError is implicitly converted into RResult...
Definition RError.hxx:300
#define b(i)
Definition RSha256.hxx:100
#define g(i)
Definition RSha256.hxx:105
#define a(i)
Definition RSha256.hxx:99
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
#define R__ASSERT(e)
Checks condition e and reports a fatal error if it's false.
Definition TError.h:125
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t mask
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t result
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
Option_t Option_t TPoint TPoint const char mode
char name[80]
Definition TGX11.cxx:145
An interface to read from, or write to, a ROOT file, as well as performing other common operations.
Definition RFile.hxx:253
Read RNTuple data blocks from a TFile container, provided by a RRawFile.
Definition RMiniFile.hxx:61
static std::size_t Zip(const void *from, std::size_t nbytes, int compression, void *to)
Returns the size of the compressed data, written into the provided output buffer.
static void Unzip(const void *from, size_t nbytes, size_t dataLen, void *to)
The nbytes parameter provides the size ls of the from buffer.
static std::unique_ptr< RNTupleFileWriter > Append(std::string_view ntupleName, TDirectory &fileOrDirectory, std::uint64_t maxKeySize, bool isHidden)
The directory parameter can also be a TFile object (TFile inherits from TDirectory).
static std::unique_ptr< RNTupleFileWriter > Recreate(std::string_view ntupleName, std::string_view path, EContainerFormat containerFormat, const ROOT::RNTupleWriteOptions &options)
Create or truncate the local file given by path with the new empty RNTuple identified by ntupleName.
static RResult< void > DeserializePageList(const void *buffer, std::uint64_t bufSize, ROOT::DescriptorId_t clusterGroupId, RNTupleDescriptor &desc, EDescriptorDeserializeMode mode)
static RResult< void > DeserializeFooter(const void *buffer, std::uint64_t bufSize, ROOT::Internal::RNTupleDescriptorBuilder &descBuilder)
static RResult< StreamerInfoMap_t > DeserializeStreamerInfos(const std::string &extraTypeInfoContent)
static RResult< void > DeserializeHeader(const void *buffer, std::uint64_t bufSize, ROOT::Internal::RNTupleDescriptorBuilder &descBuilder)
A page as being stored on disk, that is packed and compressed.
Definition RCluster.hxx:41
Base class for a sink with a physical storage backend.
void UpdateSchema(const ROOT::Internal::RNTupleModelChangeset &changeset, ROOT::NTupleSize_t firstEntry) override
Incorporate incremental changes to the model into the ntuple descriptor.
void EnableDefaultMetrics(const std::string &prefix)
Enables the default set of metrics provided by RPageSink.
Reference to a page stored in the page pool.
Storage provider that write ntuple pages into a file.
void CommitBatchOfPages(CommitBatch &batch, std::vector< RNTupleLocator > &locators)
Subroutine of CommitSealedPageVImpl, used to perform a vector write of the (multi-)range of pages con...
RPageSinkFile(std::string_view ntupleName, const ROOT::RNTupleWriteOptions &options)
std::unique_ptr< RPageSink > CloneAsHidden(std::string_view name, const ROOT::RNTupleWriteOptions &opts) const override
Creates a new sink with the same underlying storage as this but writing to a different RNTuple named ...
std::uint64_t StageClusterImpl() final
Returns the number of bytes written to storage (excluding metadata)
void InitImpl(unsigned char *serializedHeader, std::uint32_t length) final
RNTupleLocator CommitPageImpl(ColumnHandle_t columnHandle, const RPage &page) override
RNTupleLocator WriteSealedPage(const RPageStorage::RSealedPage &sealedPage, std::size_t bytesPacked)
We pass bytesPacked so that TFile::ls() reports a reasonable value for the compression ratio of the c...
RNTupleLocator CommitClusterGroupImpl(unsigned char *serializedPageList, std::uint32_t length) final
Returns the locator of the page list envelope of the given buffer that contains the serialized page l...
RNTupleLocator CommitSealedPageImpl(ROOT::DescriptorId_t physicalColumnId, const RPageStorage::RSealedPage &sealedPage) final
RNTupleLink CommitDatasetImpl() final
std::unique_ptr< ROOT::Internal::RNTupleFileWriter > fWriter
void UpdateSchema(const ROOT::Internal::RNTupleModelChangeset &changeset, ROOT::NTupleSize_t firstEntry) final
Incorporate incremental changes to the model into the ntuple descriptor.
std::vector< RNTupleLocator > CommitSealedPageVImpl(std::span< RPageStorage::RSealedPageGroup > ranges, const std::vector< bool > &mask) final
Vector commit of preprocessed pages.
Storage provider that reads ntuple pages from a file.
std::unique_ptr< ROOT::Internal::RCluster > PrepareSingleCluster(const ROOT::Internal::RCluster::RKey &clusterKey, std::vector< RRawFile::RIOVec > &readRequests)
Helper function for LoadClusters: it prepares the memory buffer (page map) and the read requests for ...
std::unique_ptr< RPageSource > OpenWithDifferentAnchor(const ROOT::Internal::RNTupleLink &anchorLink, const ROOT::RNTupleReadOptions &options={}) final
Creates a new PageSource using the same underlying file as this but referring to a different RNTuple,...
RPageRef LoadPageImpl(ColumnHandle_t columnHandle, const RClusterInfo &clusterInfo, ROOT::NTupleSize_t idxInCluster) final
static std::unique_ptr< RPageSourceFile > CreateFromAnchor(const RNTuple &anchor, const ROOT::RNTupleReadOptions &options=ROOT::RNTupleReadOptions())
Used from the RNTuple class to build a datasource if the anchor is already available.
ROOT::RNTupleDescriptor AttachImpl(RNTupleSerializer::EDescriptorDeserializeMode mode) final
LoadStructureImpl() has been called before AttachImpl() is called
std::vector< std::unique_ptr< ROOT::Internal::RCluster > > LoadClusters(std::span< ROOT::Internal::RCluster::RKey > clusterKeys) final
Populates all the pages of the given cluster ids and columns; it is possible that some columns do not...
RPageSourceFile(std::string_view ntupleName, const ROOT::RNTupleReadOptions &options)
std::unique_ptr< RPageSource > CloneImpl() const final
The cloned page source creates a new raw file and reader and opens its own file descriptor to the dat...
void LoadStreamerInfo() final
Forces the loading of ROOT StreamerInfo from the underlying file.
std::unique_ptr< RRawFile > fFile
An RRawFile is used to request the necessary byte ranges from a local or a remote file.
ROOT::Internal::RMiniFileReader fReader
Takes the fFile to read ntuple blobs from it.
void LoadSealedPage(ROOT::DescriptorId_t physicalColumnId, RNTupleLocalIndex localIndex, RSealedPage &sealedPage) final
Read the packed and compressed bytes of a page into the memory buffer provided by sealedPage.
Abstract interface to read data from an ntuple.
void EnableDefaultMetrics(const std::string &prefix)
Enables the default set of metrics provided by RPageSource.
Stores information about the cluster in which this page resides.
Definition RPage.hxx:53
A page is a slice of a column that is mapped into memory.
Definition RPage.hxx:44
static const void * GetPageZeroBuffer()
Return a pointer to the page zero buffer used if there is no on-disk data for a particular deferred c...
Definition RPage.cxx:23
The RRawFileTFile wraps an open TFile, but does not take ownership.
The RRawFile provides read-only access to local and remote files.
Definition RRawFile.hxx:43
static std::unique_ptr< RRawFile > Create(std::string_view url, ROptions options=ROptions())
Factory method that returns a suitable concrete implementation according to the transport in the url.
Definition RRawFile.cxx:64
The field for a class with dictionary.
Definition RField.hxx:138
Base class for all ROOT issued exceptions.
Definition RError.hxx:79
A field translates read and write calls from/to underlying columns to/from tree values.
The on-storage metadata of an RNTuple.
Addresses a column element or field item relative to a particular cluster, instead of a global NTuple...
Generic information about the physical location of data.
Common user-tunable settings for reading RNTuples.
Common user-tunable settings for storing RNTuples.
std::uint64_t GetMaxKeySize() const
Representation of an RNTuple data set in a ROOT file.
Definition RNTuple.hxx:68
const_iterator begin() const
const_iterator end() const
The field for a class using ROOT standard streaming.
Definition RField.hxx:238
TClass instances represent classes, structs and namespaces in the ROOT type system.
Definition TClass.h:84
TVirtualStreamerInfo * GetStreamerInfo(Int_t version=0, Bool_t isTransient=kFALSE) const
returns a pointer to the TVirtualStreamerInfo object for version If the object does not exist,...
Definition TClass.cxx:4657
Describe directory structure in memory.
Definition TDirectory.h:45
const char * GetName() const override
Returns name of object.
Definition TNamed.h:49
std::uint64_t DescriptorId_t
Distriniguishes elements of the same type within a descriptor, e.g. different fields.
std::uint64_t NTupleSize_t
Integer type long enough to hold the maximum number of entries in a column.
The identifiers that specifies the content of a (partial) cluster.
Definition RCluster.hxx:152
The incremental changes to a RNTupleModel
On-disk pages within a page source are identified by the column and page number.
Definition RCluster.hxx:51
Summarizes cluster-level information that are necessary to load a certain page.
A sealed page contains the bytes of a page as written to storage (packed & compressed).
Used for vector reads from multiple offsets into multiple buffers.
Definition RRawFile.hxx:61
Information about a single page in the context of a cluster's page range.