40#include <unordered_map>
55 auto charBuf =
reinterpret_cast<const unsigned char *
>(
fBuffer);
56 auto checksumBuf =
const_cast<unsigned char *
>(charBuf) + GetDataSize();
57 std::uint64_t xxhash3;
69 return R__FAIL(
"page checksum verification failed, data corruption detected");
76 return R__FAIL(
"invalid attempt to extract non-existing page checksum");
78 assert(fBufferSize >= kNBytesPageChecksum);
79 std::uint64_t checksum;
81 reinterpret_cast<const unsigned char *
>(
fBuffer) + fBufferSize - kNBytesPageChecksum, checksum);
89 for (
unsigned i = 0; i < fIDs.size(); ++i) {
90 if (fIDs[i] == physicalColumnID) {
95 fIDs.emplace_back(physicalColumnID);
96 fRefCounters.emplace_back(1);
101 for (
unsigned i = 0; i < fIDs.size(); ++i) {
102 if (fIDs[i] == physicalColumnID) {
103 if (--fRefCounters[i] == 0) {
104 fIDs.erase(fIDs.begin() + i);
105 fRefCounters.erase(fRefCounters.begin() + i);
116 for (
const auto &
id : fIDs)
144std::unique_ptr<ROOT::Experimental::Internal::RPageSource>
148 if (ntupleName.empty()) {
151 if (location.empty()) {
154 if (location.find(
"daos://") == 0)
156 return std::make_unique<RPageSourceDaos>(ntupleName, location, options);
161 return std::make_unique<RPageSourceFile>(ntupleName, location, options);
171 fActivePhysicalColumns.Insert(physicalId);
177 fActivePhysicalColumns.Erase(columnHandle.
fPhysicalId);
192 fHasStructure =
true;
199 GetExclDescriptorGuard().MoveIn(AttachImpl());
205 auto clone = CloneImpl();
207 clone->GetExclDescriptorGuard().MoveIn(std::move(*GetSharedDescriptorGuard()->Clone()));
208 clone->fHasStructure =
true;
209 clone->fIsAttached =
true;
216 return GetSharedDescriptorGuard()->GetNEntries();
221 return GetSharedDescriptorGuard()->GetNElements(columnHandle.
fPhysicalId);
227 UnzipClusterImpl(cluster);
234 const auto clusterId = cluster->
GetId();
235 auto descriptorGuard = GetSharedDescriptorGuard();
236 const auto &clusterDescriptor = descriptorGuard->GetClusterDescriptor(clusterId);
238 std::vector<std::unique_ptr<RColumnElementBase>> allElements;
240 std::atomic<bool> foundChecksumFailure{
false};
243 for (
const auto columnId : columnsInCluster) {
244 const auto &columnDesc = descriptorGuard->GetColumnDescriptor(columnId);
248 const auto &pageRange = clusterDescriptor.GetPageRange(columnId);
249 std::uint64_t pageNo = 0;
250 std::uint64_t firstInPage = 0;
251 for (
const auto &pi : pageRange.fPageInfos) {
257 sealedPage.
SetBufferSize(pi.fLocator.fBytesOnStorage + pi.fHasChecksum * kNBytesPageChecksum);
258 sealedPage.
SetBuffer(onDiskPage->GetAddress());
261 auto taskFunc = [
this, columnId, clusterId, firstInPage, sealedPage, element = allElements.back().get(),
262 &foundChecksumFailure,
263 indexOffset = clusterDescriptor.GetColumnRange(columnId).fFirstElementIndex]() {
264 auto rv = UnsealPage(sealedPage, *element, columnId);
266 foundChecksumFailure =
true;
269 auto newPage = rv.Unwrap();
270 fCounters->fSzUnzip.Add(element->GetSize() * sealedPage.
GetNElements());
273 fPagePool.PreloadPage(std::move(newPage));
276 fTaskScheduler->AddTask(taskFunc);
278 firstInPage += pi.fNElements;
285 fTaskScheduler->Wait();
287 if (foundChecksumFailure) {
288 throw RException(
R__FAIL(
"page checksum verification failed, data corruption detected"));
296 auto descriptorGuard = GetSharedDescriptorGuard();
297 const auto &clusterDesc = descriptorGuard->GetClusterDescriptor(clusterKey.
fClusterId);
300 if (clusterDesc.GetColumnRange(physicalColumnId).fIsSuppressed)
303 const auto &pageRange = clusterDesc.GetPageRange(physicalColumnId);
305 for (
const auto &pageInfo : pageRange.fPageInfos) {
311 perPageFunc(physicalColumnId, pageNo, pageInfo);
322 auto cachedPageRef = fPagePool.GetPage(columnId, globalIndex);
323 if (!cachedPageRef.Get().IsNull())
324 return cachedPageRef;
326 std::uint64_t idxInCluster;
329 auto descriptorGuard = GetSharedDescriptorGuard();
330 clusterInfo.
fClusterId = descriptorGuard->FindClusterId(columnId, globalIndex);
333 throw RException(
R__FAIL(
"entry with index " + std::to_string(globalIndex) +
" out of bounds"));
335 const auto &clusterDescriptor = descriptorGuard->GetClusterDescriptor(clusterInfo.
fClusterId);
336 const auto &columnRange = clusterDescriptor.GetColumnRange(columnId);
337 if (columnRange.fIsSuppressed)
343 clusterInfo.
fPageInfo = clusterDescriptor.GetPageRange(columnId).Find(idxInCluster);
349 return LoadPageImpl(columnHandle, clusterInfo, idxInCluster);
356 const auto idxInCluster = clusterIndex.
GetIndex();
358 auto cachedPageRef = fPagePool.GetPage(columnId, clusterIndex);
359 if (!cachedPageRef.Get().IsNull())
360 return cachedPageRef;
367 auto descriptorGuard = GetSharedDescriptorGuard();
368 const auto &clusterDescriptor = descriptorGuard->GetClusterDescriptor(clusterId);
369 const auto &columnRange = clusterDescriptor.GetColumnRange(columnId);
370 if (columnRange.fIsSuppressed)
375 clusterInfo.
fPageInfo = clusterDescriptor.GetPageRange(columnId).Find(idxInCluster);
381 return LoadPageImpl(columnHandle, clusterInfo, idxInCluster);
387 fCounters = std::make_unique<RCounters>(
RCounters{
391 "volume read from storage (required)"),
393 "volume read from storage (overhead)"),
396 "number of partial clusters preloaded from storage"),
399 "number of pages unzipped and decoded"),
402 "wall clock time spent decompressing"),
404 "CPU time spent reading"),
406 "CPU time spent decompressing"),
408 "bwRead",
"MB/s",
"bandwidth compressed bytes read per second", fMetrics,
410 if (
const auto szReadPayload = metrics.GetLocalCounter(
"szReadPayload")) {
411 if (
const auto szReadOverhead = metrics.GetLocalCounter(
"szReadOverhead")) {
412 if (
const auto timeWallRead = metrics.GetLocalCounter(
"timeWallRead")) {
413 if (
auto walltime = timeWallRead->GetValueAsInt()) {
414 double payload = szReadPayload->GetValueAsInt();
415 double overhead = szReadOverhead->GetValueAsInt();
417 return {
true, (1000. * (payload + overhead) / walltime)};
425 "bwReadUnzip",
"MB/s",
"bandwidth uncompressed bytes read per second", fMetrics,
428 if (
const auto timeWallRead = metrics.
GetLocalCounter(
"timeWallRead")) {
429 if (
auto walltime = timeWallRead->GetValueAsInt()) {
432 return {
true, 1000. * unzip / walltime};
439 "bwUnzip",
"MB/s",
"decompression bandwidth of uncompressed bytes per second", fMetrics,
442 if (
const auto timeWallUnzip = metrics.
GetLocalCounter(
"timeWallUnzip")) {
443 if (
auto walltime = timeWallUnzip->GetValueAsInt()) {
446 return {
true, 1000. * unzip / walltime};
453 "rtReadEfficiency",
"",
"ratio of payload over all bytes read", fMetrics,
455 if (
const auto szReadPayload = metrics.
GetLocalCounter(
"szReadPayload")) {
456 if (
const auto szReadOverhead = metrics.
GetLocalCounter(
"szReadOverhead")) {
457 if (
auto payload = szReadPayload->GetValueAsInt()) {
459 return {
true, 1. / (1. + (1. * szReadOverhead->GetValueAsInt()) / payload)};
466 "rtCompression",
"",
"ratio of compressed bytes / uncompressed bytes", fMetrics,
468 if (
const auto szReadPayload = metrics.
GetLocalCounter(
"szReadPayload")) {
470 if (
auto unzip = szUnzip->GetValueAsInt()) {
471 return {
true, (1. * szReadPayload->GetValueAsInt()) / unzip};
483 return UnsealPage(sealedPage, element, physicalColumnId, *fPageAllocator);
510 memcpy(page.GetBuffer(), sealedPage.
GetBuffer(), bytesPacked);
516 page = std::move(tmp);
534 std::size_t pageSizeLimit)
536 if (fMaxAllocatedBytes - fCurrentAllocatedBytes >= targetAvailableSize)
539 auto itr = fColumnsSortedByPageSize.begin();
540 while (itr != fColumnsSortedByPageSize.end()) {
541 if (itr->fCurrentPageSize <= pageSizeLimit)
543 if (itr->fCurrentPageSize == itr->fInitialPageSize) {
549 auto itrFlush = itr++;
552 if (itr != fColumnsSortedByPageSize.end())
555 itrFlush->fColumn->Flush();
556 if (fMaxAllocatedBytes - fCurrentAllocatedBytes >= targetAvailableSize)
559 if (next.fColumn ==
nullptr)
561 itr = fColumnsSortedByPageSize.find(next);
570 auto itr = fColumnsSortedByPageSize.find(key);
571 if (itr == fColumnsSortedByPageSize.end()) {
572 if (!TryEvict(newWritePageSize, 0))
574 fColumnsSortedByPageSize.insert({&column, newWritePageSize, newWritePageSize});
575 fCurrentAllocatedBytes += newWritePageSize;
580 assert(newWritePageSize >= elem.fInitialPageSize);
582 if (newWritePageSize == elem.fCurrentPageSize)
585 fColumnsSortedByPageSize.erase(itr);
587 if (newWritePageSize < elem.fCurrentPageSize) {
589 fCurrentAllocatedBytes -= elem.fCurrentPageSize - newWritePageSize;
590 elem.fCurrentPageSize = newWritePageSize;
591 fColumnsSortedByPageSize.insert(elem);
596 const auto diffBytes = newWritePageSize - elem.fCurrentPageSize;
597 if (!TryEvict(diffBytes, elem.fCurrentPageSize)) {
600 fColumnsSortedByPageSize.insert(elem);
603 fCurrentAllocatedBytes += diffBytes;
604 elem.fCurrentPageSize = newWritePageSize;
605 fColumnsSortedByPageSize.insert(elem);
612 :
RPageStorage(
name), fOptions(options.Clone()), fWritePageMemoryManager(options.GetPageBufferBudget())
621 assert(config.
fPage);
625 unsigned char *pageBuf =
reinterpret_cast<unsigned char *
>(config.
fPage->
GetBuffer());
626 bool isAdoptedBuffer =
true;
628 auto nBytesChecksum = config.
fWriteChecksum * kNBytesPageChecksum;
632 pageBuf =
new unsigned char[nBytesPacked];
633 isAdoptedBuffer =
false;
636 auto nBytesZipped = nBytesPacked;
641 if (!isAdoptedBuffer)
643 pageBuf =
reinterpret_cast<unsigned char *
>(config.
fBuffer);
644 isAdoptedBuffer =
true;
650 sealedPage.ChecksumIfEnabled();
658 const auto nBytes = page.
GetNBytes() + GetWriteOptions().GetEnablePageChecksums() * kNBytesPageChecksum;
659 if (fSealPageBuffer.size() < nBytes)
660 fSealPageBuffer.resize(nBytes);
663 config.
fPage = &page;
666 config.
fWriteChecksum = GetWriteOptions().GetEnablePageChecksums();
668 config.
fBuffer = fSealPageBuffer.data();
670 return SealPage(config);
675 for (
const auto &cb : fOnDatasetCommitCallbacks)
685 const auto nBytes = elementSize * nElements;
686 if (!fWritePageMemoryManager.TryUpdate(*columnHandle.
fColumn, nBytes))
688 return fPageAllocator->NewPage(columnHandle.
fPhysicalId, elementSize, nElements);
693std::unique_ptr<ROOT::Experimental::Internal::RPageSink>
697 if (ntupleName.empty()) {
700 if (location.empty()) {
703 if (location.find(
"daos://") == 0) {
705 return std::make_unique<RPageSinkDaos>(ntupleName, location, options);
712 return std::make_unique<RPageSinkFile>(ntupleName, location, options);
726 auto columnId = fDescriptorBuilder.GetDescriptor().GetNPhysicalColumns();
741 fDescriptorBuilder.AddColumn(columnBuilder.
MakeDescriptor().Unwrap());
748 const auto &descriptor = fDescriptorBuilder.GetDescriptor();
750 if (descriptor.GetNLogicalColumns() > descriptor.GetNPhysicalColumns()) {
753 auto getNColumns = [](
const RFieldBase &
f) -> std::size_t {
754 const auto &reps =
f.GetColumnRepresentatives();
757 return reps.size() * reps[0].size();
759 std::uint32_t nNewPhysicalColumns = 0;
761 nNewPhysicalColumns += getNColumns(*
f);
762 for (
const auto &descendant : *
f)
763 nNewPhysicalColumns += getNColumns(descendant);
765 fDescriptorBuilder.ShiftAliasColumns(nNewPhysicalColumns);
769 auto fieldId = descriptor.GetNFields();
771 fDescriptorBuilder.AddFieldLink(
f.GetParent()->GetOnDiskId(), fieldId);
772 f.SetOnDiskId(fieldId);
776 auto fieldId = descriptor.GetNFields();
779 fDescriptorBuilder.AddFieldLink(
f.GetParent()->GetOnDiskId(), fieldId);
780 fDescriptorBuilder.AddFieldProjection(sourceFieldId, fieldId);
781 f.SetOnDiskId(fieldId);
782 for (
const auto &source : descriptor.GetColumnIterable(sourceFieldId)) {
783 auto targetId = descriptor.GetNLogicalColumns();
789 .ValueRange(source.GetValueRange())
790 .Type(source.GetType())
791 .Index(source.GetIndex())
792 .RepresentationIndex(source.GetRepresentationIndex());
793 fDescriptorBuilder.AddColumn(columnBuilder.
MakeDescriptor().Unwrap());
797 R__ASSERT(firstEntry >= fPrevClusterNEntries);
798 const auto nColumnsBeforeUpdate = descriptor.GetNPhysicalColumns();
801 for (
auto &descendant : *
f)
802 addField(descendant);
805 addProjectedField(*
f);
806 for (
auto &descendant : *
f)
807 addProjectedField(descendant);
810 const auto nColumns = descriptor.GetNPhysicalColumns();
811 for (
DescriptorId_t i = nColumnsBeforeUpdate; i < nColumns; ++i) {
817 columnRange.
fFirstElementIndex = descriptor.GetColumnDescriptor(i).GetFirstElementIndex();
820 fOpenColumnRanges.emplace_back(columnRange);
823 fOpenPageRanges.emplace_back(std::move(pageRange));
828 if (fSerializationContext.GetHeaderSize() > 0)
829 fSerializationContext.MapSchema(descriptor,
true);
836 throw RException(
R__FAIL(
"ROOT bug: unexpected type extra info in UpdateExtraTypeInfo()"));
843 fDescriptorBuilder.SetNTuple(fNTupleName, model.
GetDescription());
844 const auto &descriptor = fDescriptorBuilder.GetDescriptor();
848 fieldZero.SetOnDiskId(0);
850 projectedFields.GetFieldZero().SetOnDiskId(0);
853 for (
auto f : fieldZero.GetSubFields())
854 initialChangeset.fAddedFields.emplace_back(
f);
855 for (
auto f : projectedFields.GetFieldZero().GetSubFields())
856 initialChangeset.fAddedProjectedFields.emplace_back(
f);
857 UpdateSchema(initialChangeset, 0U);
860 auto buffer = std::make_unique<unsigned char[]>(fSerializationContext.GetHeaderSize());
862 InitImpl(buffer.get(), fSerializationContext.GetHeaderSize());
864 fDescriptorBuilder.BeginHeaderExtension();
881 clusterBuilder.
ClusterId(fDescriptorBuilder.GetDescriptor().GetNActiveClusters())
885 for (
unsigned int i = 0; i < fOpenColumnRanges.size(); ++i) {
886 R__ASSERT(fOpenColumnRanges[i].fPhysicalColumnId == i);
887 const auto &columnRange = cluster.GetColumnRange(i);
888 R__ASSERT(columnRange.fPhysicalColumnId == i);
889 const auto &pageRange = cluster.GetPageRange(i);
890 R__ASSERT(pageRange.fPhysicalColumnId == i);
891 clusterBuilder.
CommitColumnRange(i, fOpenColumnRanges[i].fFirstElementIndex, columnRange.fCompressionSettings,
893 fOpenColumnRanges[i].fFirstElementIndex += columnRange.fNElements;
895 fDescriptorBuilder.AddCluster(clusterBuilder.
MoveDescriptor().Unwrap());
896 fPrevClusterNEntries += nEntries;
904 fOpenColumnRanges.at(columnHandle.
fPhysicalId).fIsSuppressed =
true;
913 pageInfo.
fLocator = CommitPageImpl(columnHandle, page);
914 pageInfo.
fHasChecksum = GetWriteOptions().GetEnablePageChecksums();
915 fOpenPageRanges.at(columnHandle.
fPhysicalId).fPageInfos.emplace_back(pageInfo);
921 fOpenColumnRanges.at(physicalColumnId).fNElements += sealedPage.
GetNElements();
925 pageInfo.
fLocator = CommitSealedPageImpl(physicalColumnId, sealedPage);
927 fOpenPageRanges.at(physicalColumnId).fPageInfos.emplace_back(pageInfo);
930std::vector<ROOT::Experimental::RNTupleLocator>
932 std::span<RPageStorage::RSealedPageGroup> ranges,
const std::vector<bool> &
mask)
934 std::vector<ROOT::Experimental::RNTupleLocator> locators;
935 locators.reserve(
mask.size());
937 for (
auto &range : ranges) {
938 for (
auto sealedPageIt = range.fFirst; sealedPageIt != range.fLast; ++sealedPageIt) {
940 locators.push_back(CommitSealedPageImpl(range.fPhysicalColumnId, *sealedPageIt));
943 locators.shrink_to_fit();
948 std::span<RPageStorage::RSealedPageGroup> ranges)
951 struct RSealedPageLink {
953 std::size_t fLocatorIdx = 0;
956 std::vector<bool>
mask;
958 std::vector<std::size_t> locatorIndexes;
960 std::unordered_map<std::uint64_t, RSealedPageLink> originalPages;
961 std::size_t iLocator = 0;
962 for (
auto &range : ranges) {
963 const auto rangeSize = std::distance(range.fFirst, range.fLast);
964 mask.reserve(
mask.size() + rangeSize);
965 locatorIndexes.reserve(locatorIndexes.size() + rangeSize);
967 for (
auto sealedPageIt = range.fFirst; sealedPageIt != range.fLast; ++sealedPageIt) {
968 if (!fFeatures.fCanMergePages || !sealedPageIt->GetHasChecksum()) {
969 mask.emplace_back(
true);
970 locatorIndexes.emplace_back(iLocator++);
974 const auto chk = sealedPageIt->GetChecksum().Unwrap();
975 auto itr = originalPages.find(chk);
976 if (itr == originalPages.end()) {
977 originalPages.insert({chk, {&(*sealedPageIt), iLocator}});
978 mask.emplace_back(
true);
979 locatorIndexes.emplace_back(iLocator++);
983 const auto *
p = itr->second.fSealedPage;
984 if (sealedPageIt->GetDataSize() !=
p->GetDataSize() ||
985 memcmp(sealedPageIt->GetBuffer(),
p->GetBuffer(),
p->GetDataSize())) {
986 mask.emplace_back(
true);
987 locatorIndexes.emplace_back(iLocator++);
991 mask.emplace_back(
false);
992 locatorIndexes.emplace_back(itr->second.fLocatorIdx);
995 mask.shrink_to_fit();
996 locatorIndexes.shrink_to_fit();
999 auto locators = CommitSealedPageVImpl(ranges,
mask);
1002 for (
auto &range : ranges) {
1003 for (
auto sealedPageIt = range.fFirst; sealedPageIt != range.fLast; ++sealedPageIt) {
1004 fOpenColumnRanges.at(range.fPhysicalColumnId).fNElements += sealedPageIt->GetNElements();
1007 pageInfo.
fNElements = sealedPageIt->GetNElements();
1008 pageInfo.
fLocator = locators[locatorIndexes[i++]];
1009 pageInfo.
fHasChecksum = sealedPageIt->GetHasChecksum();
1010 fOpenPageRanges.at(range.fPhysicalColumnId).fPageInfos.emplace_back(pageInfo);
1022 for (
unsigned int i = 0; i < fOpenColumnRanges.size(); ++i) {
1023 RStagedCluster::RColumnInfo columnInfo;
1024 if (fOpenColumnRanges[i].fIsSuppressed) {
1025 assert(fOpenPageRanges[i].fPageInfos.empty());
1026 columnInfo.fPageRange.fPhysicalColumnId = i;
1027 columnInfo.fIsSuppressed =
true;
1029 fOpenColumnRanges[i].fNElements = 0;
1030 fOpenColumnRanges[i].fIsSuppressed =
false;
1032 std::swap(columnInfo.fPageRange, fOpenPageRanges[i]);
1033 fOpenPageRanges[i].fPhysicalColumnId = i;
1035 columnInfo.fNElements = fOpenColumnRanges[i].fNElements;
1036 fOpenColumnRanges[i].fNElements = 0;
1038 stagedCluster.
fColumnInfos.push_back(std::move(columnInfo));
1041 return stagedCluster;
1046 for (
const auto &cluster : clusters) {
1048 clusterBuilder.
ClusterId(fDescriptorBuilder.GetDescriptor().GetNActiveClusters())
1049 .FirstEntryIndex(fPrevClusterNEntries)
1051 for (
const auto &columnInfo : cluster.fColumnInfos) {
1053 if (columnInfo.fIsSuppressed) {
1054 assert(columnInfo.fPageRange.fPageInfos.empty());
1057 clusterBuilder.
CommitColumnRange(colId, fOpenColumnRanges[colId].fFirstElementIndex,
1058 fOpenColumnRanges[colId].fCompressionSettings, columnInfo.fPageRange);
1059 fOpenColumnRanges[colId].fFirstElementIndex += columnInfo.fNElements;
1064 for (
const auto &columnInfo : cluster.fColumnInfos) {
1065 if (!columnInfo.fIsSuppressed)
1071 const auto &columnRangeFromDesc = clusterBuilder.
GetColumnRange(colId);
1073 columnRangeFromDesc.fFirstElementIndex + columnRangeFromDesc.fNElements;
1076 fDescriptorBuilder.AddCluster(clusterBuilder.
MoveDescriptor().Unwrap());
1077 fPrevClusterNEntries += cluster.fNEntries;
1083 const auto &descriptor = fDescriptorBuilder.GetDescriptor();
1085 const auto nClusters = descriptor.GetNActiveClusters();
1086 std::vector<DescriptorId_t> physClusterIDs;
1087 for (
auto i = fNextClusterInGroup; i < nClusters; ++i) {
1088 physClusterIDs.emplace_back(fSerializationContext.MapClusterId(i));
1092 auto bufPageList = std::make_unique<unsigned char[]>(szPageList);
1095 const auto clusterGroupId = descriptor.GetNClusterGroups();
1096 const auto locator = CommitClusterGroupImpl(bufPageList.get(), szPageList);
1099 if (fNextClusterInGroup == nClusters) {
1102 const auto &firstClusterDesc = descriptor.GetClusterDescriptor(fNextClusterInGroup);
1103 const auto &lastClusterDesc = descriptor.GetClusterDescriptor(nClusters - 1);
1104 cgBuilder.
MinEntry(firstClusterDesc.GetFirstEntryIndex())
1105 .
EntrySpan(lastClusterDesc.GetFirstEntryIndex() + lastClusterDesc.GetNEntries() -
1106 firstClusterDesc.GetFirstEntryIndex())
1107 .
NClusters(nClusters - fNextClusterInGroup);
1109 std::vector<DescriptorId_t> clusterIds;
1110 for (
auto i = fNextClusterInGroup; i < nClusters; ++i) {
1111 clusterIds.emplace_back(i);
1114 fDescriptorBuilder.AddClusterGroup(cgBuilder.
MoveDescriptor().Unwrap());
1115 fSerializationContext.MapClusterGroupId(clusterGroupId);
1117 fNextClusterInGroup = nClusters;
1122 if (!fStreamerInfos.empty()) {
1126 fDescriptorBuilder.AddExtraTypeInfo(extraInfoBuilder.
MoveDescriptor().Unwrap());
1129 const auto &descriptor = fDescriptorBuilder.GetDescriptor();
1132 auto bufFooter = std::make_unique<unsigned char[]>(szFooter);
1135 CommitDatasetImpl(bufFooter.get(), szFooter);
1141 fCounters = std::make_unique<RCounters>(
RCounters{
1143 "number of pages committed to storage"),
1145 "volume written for committed pages"),
1150 "CPU time spent writing"),
1152 "CPU time spent compressing")});
#define R__FORWARD_ERROR(res)
Short-hand to return an RResult<T> in an error state (i.e. after checking)
#define R__FAIL(msg)
Short-hand to return an RResult<T> in an error state; the RError is implicitly converted into RResult...
#define R__ASSERT(e)
Checks condition e and reports a fatal error if it's false.
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t mask
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t result
A thread-safe integral performance counter.
A metric element that computes its floating point value from other counters.
std::int64_t GetValueAsInt() const override
A collection of Counter objects with a name, a unit, and a description.
const RNTuplePerfCounter * GetLocalCounter(std::string_view name) const
Searches counters registered in this object only. Returns nullptr if name is not found.
An either thread-safe or non thread safe counter for CPU ticks.
Record wall time and CPU time between construction and destruction.
A helper class for piece-wise construction of an RClusterDescriptor.
RResult< RClusterDescriptor > MoveDescriptor()
Move out the full cluster descriptor including page locations.
RClusterDescriptorBuilder & ClusterId(DescriptorId_t clusterId)
RClusterDescriptorBuilder & NEntries(std::uint64_t nEntries)
RClusterDescriptorBuilder & FirstEntryIndex(std::uint64_t firstEntryIndex)
const RClusterDescriptor::RColumnRange & GetColumnRange(DescriptorId_t physicalId)
RResult< void > MarkSuppressedColumnRange(DescriptorId_t physicalId)
Books the given column ID as being suppressed in this cluster.
RResult< void > CommitColumnRange(DescriptorId_t physicalId, std::uint64_t firstElementIndex, std::uint32_t compressionSettings, const RClusterDescriptor::RPageRange &pageRange)
RResult< void > CommitSuppressedColumnRanges(const RNTupleDescriptor &desc)
Sets the first element index and number of elements for all the suppressed column ranges.
A helper class for piece-wise construction of an RClusterGroupDescriptor.
RClusterGroupDescriptorBuilder & PageListLocator(const RNTupleLocator &pageListLocator)
void AddClusters(const std::vector< DescriptorId_t > &clusterIds)
RClusterGroupDescriptorBuilder & MinEntry(std::uint64_t minEntry)
RClusterGroupDescriptorBuilder & ClusterGroupId(DescriptorId_t clusterGroupId)
RClusterGroupDescriptorBuilder & EntrySpan(std::uint64_t entrySpan)
RClusterGroupDescriptorBuilder & NClusters(std::uint32_t nClusters)
RClusterGroupDescriptorBuilder & PageListLength(std::uint64_t pageListLength)
RResult< RClusterGroupDescriptor > MoveDescriptor()
An in-memory subset of the packed and compressed pages of a cluster.
const ColumnSet_t & GetAvailPhysicalColumns() const
const ROnDiskPage * GetOnDiskPage(const ROnDiskPage::Key &key) const
size_t GetNOnDiskPages() const
DescriptorId_t GetId() const
std::unordered_set< DescriptorId_t > ColumnSet_t
A helper class for piece-wise construction of an RColumnDescriptor.
RColumnDescriptorBuilder & PhysicalColumnId(DescriptorId_t physicalColumnId)
RColumnDescriptorBuilder & Type(EColumnType type)
RColumnDescriptorBuilder & SetSuppressedDeferred()
RColumnDescriptorBuilder & BitsOnStorage(std::uint16_t bitsOnStorage)
RColumnDescriptorBuilder & RepresentationIndex(std::uint16_t representationIndex)
RColumnDescriptorBuilder & FieldId(DescriptorId_t fieldId)
RColumnDescriptorBuilder & Index(std::uint32_t index)
RColumnDescriptorBuilder & FirstElementIndex(std::uint64_t firstElementIdx)
RResult< RColumnDescriptor > MakeDescriptor() const
Attempt to make a column descriptor.
RColumnDescriptorBuilder & LogicalColumnId(DescriptorId_t logicalColumnId)
RColumnDescriptorBuilder & ValueRange(double min, double max)
A column element encapsulates the translation between basic C++ types and their column representation...
std::size_t GetSize() const
virtual bool IsMappable() const
Derived, typed classes tell whether the on-storage layout is bitwise identical to the memory layout.
virtual void Pack(void *destination, const void *source, std::size_t count) const
If the on-storage layout and the in-memory layout differ, packing creates an on-disk page from an in-...
virtual void Unpack(void *destination, const void *source, std::size_t count) const
If the on-storage layout and the in-memory layout differ, unpacking creates a memory page from an on-...
static std::unique_ptr< RColumnElementBase > Generate(EColumnType type)
If CppT == void, use the default C++ type for the given column type.
std::size_t GetPackedSize(std::size_t nElements=1U) const
A column is a storage-backed array of a simple, fixed-size type, from which pages can be mapped into ...
RColumnElementBase * GetElement() const
DescriptorId_t GetOnDiskId() const
std::optional< std::pair< double, double > > GetValueRange() const
std::uint16_t GetRepresentationIndex() const
EColumnType GetType() const
std::uint32_t GetIndex() const
std::size_t GetWritePageCapacity() const
NTupleSize_t GetFirstElementIndex() const
std::uint16_t GetBitsOnStorage() const
static RFieldDescriptorBuilder FromField(const RFieldBase &field)
Make a new RFieldDescriptorBuilder based off a live NTuple field.
RResult< RFieldDescriptor > MakeDescriptor() const
Attempt to make a field descriptor.
RFieldDescriptorBuilder & FieldId(DescriptorId_t fieldId)
size_t Zip(const void *from, size_t nbytes, int compression, Writer_t fnWriter)
Returns the size of the compressed data.
static void Unzip(const void *from, size_t nbytes, size_t dataLen, void *to)
The nbytes parameter provides the size ls of the from buffer.
static std::uint32_t SerializeXxHash3(const unsigned char *data, std::uint64_t length, std::uint64_t &xxhash3, void *buffer)
Writes a XxHash-3 64bit checksum of the byte range given by data and length.
static RContext SerializeHeader(void *buffer, const RNTupleDescriptor &desc)
static std::string SerializeStreamerInfos(const StreamerInfoMap_t &infos)
static std::uint32_t SerializePageList(void *buffer, const RNTupleDescriptor &desc, std::span< DescriptorId_t > physClusterIDs, const RContext &context)
static RResult< StreamerInfoMap_t > DeserializeStreamerInfos(const std::string &extraTypeInfoContent)
static RResult< void > VerifyXxHash3(const unsigned char *data, std::uint64_t length, std::uint64_t &xxhash3)
Expects an xxhash3 checksum in the 8 bytes following data + length and verifies it.
static std::uint32_t DeserializeUInt64(const void *buffer, std::uint64_t &val)
static std::uint32_t SerializeFooter(void *buffer, const RNTupleDescriptor &desc, const RContext &context)
A memory region that contains packed and compressed pages.
void Register(const ROnDiskPage::Key &key, const ROnDiskPage &onDiskPage)
Inserts information about a page stored in fMemory.
A page as being stored on disk, that is packed and compressed.
Uses standard C++ memory allocation for the column data pages.
Abstract interface to allocate and release pages.
virtual RPage NewPage(ColumnId_t columnId, std::size_t elementSize, std::size_t nElements)=0
Reserves memory large enough to hold nElements of the given size.
ColumnHandle_t AddColumn(DescriptorId_t fieldId, RColumn &column) final
Register a new column.
RStagedCluster StageCluster(NTupleSize_t nNewEntries) final
Stage the current cluster and create a new one for the following data.
virtual void InitImpl(unsigned char *serializedHeader, std::uint32_t length)=0
~RPagePersistentSink() override
virtual std::vector< RNTupleLocator > CommitSealedPageVImpl(std::span< RPageStorage::RSealedPageGroup > ranges, const std::vector< bool > &mask)
Vector commit of preprocessed pages.
void InitFromDescriptor(const RNTupleDescriptor &descriptor)
Initialize sink based on an existing descriptor and fill into the descriptor builder.
void CommitPage(ColumnHandle_t columnHandle, const RPage &page) final
Write a page to the storage. The column must have been added before.
static std::unique_ptr< RPageSink > Create(std::string_view ntupleName, std::string_view location, const RNTupleWriteOptions &options=RNTupleWriteOptions())
Guess the concrete derived page source from the location.
RPagePersistentSink(std::string_view ntupleName, const RNTupleWriteOptions &options)
void CommitClusterGroup() final
Write out the page locations (page list envelope) for all the committed clusters since the last call ...
void UpdateSchema(const RNTupleModelChangeset &changeset, NTupleSize_t firstEntry) final
Incorporate incremental changes to the model into the ntuple descriptor.
void CommitSealedPage(DescriptorId_t physicalColumnId, const RPageStorage::RSealedPage &sealedPage) final
Write a preprocessed page to storage. The column must have been added before.
void CommitSealedPageV(std::span< RPageStorage::RSealedPageGroup > ranges) final
Write a vector of preprocessed pages to storage. The corresponding columns must have been added befor...
void CommitSuppressedColumn(ColumnHandle_t columnHandle) final
Commits a suppressed column for the current cluster.
void CommitStagedClusters(std::span< RStagedCluster > clusters) final
Commit staged clusters, logically appending them to the ntuple descriptor.
void EnableDefaultMetrics(const std::string &prefix)
Enables the default set of metrics provided by RPageSink.
void UpdateExtraTypeInfo(const RExtraTypeInfoDescriptor &extraTypeInfo) final
Adds an extra type information record to schema.
void CommitDatasetImpl() final
Abstract interface to write data into an ntuple.
RPageSink(std::string_view ntupleName, const RNTupleWriteOptions &options)
void CommitDataset()
Run the registered callbacks and finalize the current cluster and the entrire data set.
virtual RPage ReservePage(ColumnHandle_t columnHandle, std::size_t nElements)
Get a new, empty page for the given column that can be filled with up to nElements; nElements must be...
RSealedPage SealPage(const RPage &page, const RColumnElementBase &element)
Helper for streaming a page.
RCluster::ColumnSet_t ToColumnSet() const
void Erase(DescriptorId_t physicalColumnID)
void Insert(DescriptorId_t physicalColumnID)
ColumnHandle_t AddColumn(DescriptorId_t fieldId, RColumn &column) override
Register a new column.
void LoadStructure()
Loads header and footer without decompressing or deserializing them.
RPageSource(std::string_view ntupleName, const RNTupleReadOptions &fOptions)
void PrepareLoadCluster(const RCluster::RKey &clusterKey, ROnDiskPageMap &pageZeroMap, std::function< void(DescriptorId_t, NTupleSize_t, const RClusterDescriptor::RPageRange::RPageInfo &)> perPageFunc)
Prepare a page range read for the column set in clusterKey.
std::unique_ptr< RPageSource > Clone() const
Open the same storage multiple time, e.g.
void EnableDefaultMetrics(const std::string &prefix)
Enables the default set of metrics provided by RPageSource.
void DropColumn(ColumnHandle_t columnHandle) override
Unregisters a column.
static std::unique_ptr< RPageSource > Create(std::string_view ntupleName, std::string_view location, const RNTupleReadOptions &options=RNTupleReadOptions())
Guess the concrete derived page source from the file name (location)
NTupleSize_t GetNElements(ColumnHandle_t columnHandle)
void UnzipCluster(RCluster *cluster)
Parallel decompression and unpacking of the pages in the given cluster.
virtual RPageRef LoadPage(ColumnHandle_t columnHandle, NTupleSize_t globalIndex)
Allocates and fills a page that contains the index-th element.
void SetEntryRange(const REntryRange &range)
Promise to only read from the given entry range.
virtual void UnzipClusterImpl(RCluster *cluster)
void Attach()
Open the physical storage container and deserialize header and footer.
static RResult< RPage > UnsealPage(const RSealedPage &sealedPage, const RColumnElementBase &element, DescriptorId_t physicalColumnId, RPageAllocator &pageAlloc)
Helper for unstreaming a page.
NTupleSize_t GetNEntries()
Common functionality of an ntuple storage for both reading and writing.
RPageStorage(std::string_view name)
Stores information about the cluster in which this page resides.
A page is a slice of a column that is mapped into memory.
std::size_t GetNBytes() const
The space taken by column elements in the buffer.
static RPage MakePageZero(ColumnId_t columnId, ClusterSize_t::ValueType elementSize)
Make a 'zero' page for column columnId (that is comprised of 0x00 bytes only).
std::uint32_t GetNElements() const
static const void * GetPageZeroBuffer()
Return a pointer to the page zero buffer used if there is no on-disk data for a particular deferred c...
const RFieldBase * GetSourceField(const RFieldBase *target) const
bool TryUpdate(RColumn &column, std::size_t newWritePageSize)
Try to register the new write page size for the given column.
bool TryEvict(std::size_t targetAvailableSize, std::size_t pageSizeLimit)
Flush columns in order of allocated write page size until the sum of all write page allocations leave...
Meta-data for a set of ntuple clusters.
NTupleSize_t GetFirstEntryIndex() const
ClusterSize_t GetNEntries() const
Addresses a column element or field item relative to a particular cluster, instead of a global NTuple...
DescriptorId_t GetClusterId() const
ClusterSize_t::ValueType GetIndex() const
Base class for all ROOT issued exceptions.
A field translates read and write calls from/to underlying columns to/from tree values.
DescriptorId_t GetOnDiskId() const
The on-storage meta-data of an ntuple.
std::unique_ptr< RNTupleModel > CreateModel(const RCreateModelOptions &options=RCreateModelOptions()) const
Re-create the C++ model from the stored meta-data.
DescriptorId_t FindNextClusterId(DescriptorId_t clusterId) const
DescriptorId_t FindClusterId(DescriptorId_t physicalColumnId, NTupleSize_t index) const
const RClusterDescriptor & GetClusterDescriptor(DescriptorId_t clusterId) const
The RNTupleModel encapulates the schema of an ntuple.
const std::string & GetDescription() const
Common user-tunable settings for reading ntuples.
Common user-tunable settings for storing ntuples.
void ThrowOnError()
Short-hand method to throw an exception in the case of errors.
The class is used as a return type for operations that can fail; wraps a value of type T or an RError...
RProjectedFields & GetProjectedFieldsOfModel(RNTupleModel &model)
void CallConnectPageSinkOnField(RFieldBase &, RPageSink &, NTupleSize_t firstEntry=0)
RFieldZero & GetFieldZeroOfModel(RNTupleModel &model)
std::uint64_t NTupleSize_t
Integer type long enough to hold the maximum number of entries in a column.
std::uint64_t DescriptorId_t
Distriniguishes elements of the same type within a descriptor, e.g. different fields.
constexpr NTupleSize_t kInvalidNTupleIndex
constexpr DescriptorId_t kInvalidDescriptorId
The identifiers that specifies the content of a (partial) cluster.
DescriptorId_t fClusterId
ColumnSet_t fPhysicalColumnSet
The incremental changes to a RNTupleModel
std::vector< RFieldBase * > fAddedProjectedFields
Points to the projected fields in fModel that were added as part of an updater transaction.
std::vector< RFieldBase * > fAddedFields
Points to the fields in fModel that were added as part of an updater transaction.
On-disk pages within a page source are identified by the column and page number.
Default I/O performance counters that get registered in fMetrics.
Parameters for the SealPage() method.
const RColumnElementBase * fElement
Corresponds to the page's elements, for size calculation etc.
void * fBuffer
Location for sealed output. The memory buffer has to be large enough.
bool fAllowAlias
If false, the output buffer must not point to the input page buffer, which would otherwise be an opti...
int fCompressionSetting
Compression algorithm and level to apply.
bool fWriteChecksum
Adds a 8 byte little-endian xxhash3 checksum to the page payload.
const RPage * fPage
Input page to be sealed.
Cluster that was staged, but not yet logically appended to the RNTuple.
std::uint64_t fNBytesWritten
std::vector< RColumnInfo > fColumnInfos
Summarizes cluster-level information that are necessary to load a certain page.
DescriptorId_t fClusterId
std::uint64_t fColumnOffset
The first element number of the page's column in the given cluster.
RClusterDescriptor::RPageRange::RPageInfoExtended fPageInfo
Location of the page on disk.
Default I/O performance counters that get registered in fMetrics
Used in SetEntryRange / GetEntryRange.
bool IntersectsWith(const RClusterDescriptor &clusterDesc) const
Returns true if the given cluster has entries within the entry range.
DescriptorId_t fPhysicalId
A sealed page contains the bytes of a page as written to storage (packed & compressed).
const void * GetBuffer() const
void SetHasChecksum(bool hasChecksum)
void SetNElements(std::uint32_t nElements)
void SetBuffer(const void *buffer)
void SetBufferSize(std::size_t bufferSize)
RResult< void > VerifyChecksumIfEnabled() const
RResult< std::uint64_t > GetChecksum() const
Returns a failure if the sealed page has no checksum.
std::uint32_t GetNElements() const
std::size_t GetBufferSize() const
bool GetHasChecksum() const
std::size_t GetDataSize() const
std::size_t fCurrentPageSize
bool operator>(const RColumnInfo &other) const
The window of element indexes of a particular column in a particular cluster.
NTupleSize_t fFirstElementIndex
The global index of the first column element in the cluster.
int fCompressionSettings
The usual format for ROOT compression settings (see Compression.h).
ClusterSize_t fNElements
The number of column elements in the cluster.
DescriptorId_t fPhysicalColumnId
ELocatorType fType
For non-disk locators, the value for the Type field.