55 ,
fPool(2 * clusterBunchSize)
66 std::unique_lock<std::mutex> lock(fLockWorkQueue);
68 fCvHasReadWork.notify_one();
74 std::unique_lock<std::mutex> lock(fLockUnzipQueue);
76 fCvHasUnzipWork.notify_one();
84 std::vector<RUnzipItem> unzipItems;
86 std::unique_lock<std::mutex> lock(fLockUnzipQueue);
87 fCvHasUnzipWork.wait(lock, [&]{
return !fUnzipQueue.empty(); });
88 while (!fUnzipQueue.empty()) {
89 unzipItems.emplace_back(std::move(fUnzipQueue.front()));
94 for (
auto &item : unzipItems) {
98 fPageSource.UnzipCluster(item.fCluster.get());
101 item.fPromise.set_value(std::move(item.fCluster));
109 std::vector<RReadItem> readItems;
110 std::vector<RCluster::RKey> clusterKeys;
111 std::int64_t bunchId = -1;
113 std::unique_lock<std::mutex> lock(fLockWorkQueue);
114 fCvHasReadWork.wait(lock, [&]{
return !fReadQueue.empty(); });
115 while (!fReadQueue.empty()) {
121 if ((bunchId >= 0) && (fReadQueue.front().fBunchId != bunchId))
123 readItems.emplace_back(std::move(fReadQueue.front()));
125 bunchId = readItems.back().fBunchId;
126 clusterKeys.emplace_back(readItems.back().fClusterKey);
130 auto clusters = fPageSource.LoadClusters(clusterKeys);
132 for (std::size_t i = 0; i < clusters.size(); ++i) {
135 bool discard =
false;
137 std::unique_lock<std::mutex> lock(fLockWorkQueue);
138 for (
auto &inFlight : fInFlightClusters) {
139 if (inFlight.fClusterKey.fClusterId != clusters[i]->GetId())
141 discard = inFlight.fIsExpired;
147 readItems[i].fPromise.set_value(std::move(clusters[i]));
150 std::unique_lock<std::mutex> lock(fLockUnzipQueue);
151 fUnzipQueue.emplace(
RUnzipItem{std::move(clusters[i]), std::move(readItems[i].fPromise)});
152 fCvHasUnzipWork.notify_one();
161 for (
const auto &cptr : fPool) {
162 if (cptr && (cptr->GetId() == clusterId))
170 auto N = fPool.size();
171 for (
unsigned i = 0; i <
N; ++i) {
190 std::int64_t fBunchId = -1;
192 ColumnSet_t fColumnSet;
195 static constexpr std::int64_t kFlagRequired = 0x01;
196 static constexpr std::int64_t kFlagLast = 0x02;
199 std::map<DescriptorId_t, RInfo> fMap;
204 fMap.emplace(clusterId, info);
207 bool Contains(DescriptorId_t clusterId) {
208 return fMap.count(clusterId) > 0;
211 std::size_t GetSize()
const {
return fMap.size(); }
213 void Erase(DescriptorId_t clusterId,
const ColumnSet_t &columns)
215 auto itr = fMap.find(clusterId);
216 if (itr == fMap.end())
219 std::copy_if(itr->second.fColumnSet.begin(), itr->second.fColumnSet.end(), std::inserter(
d,
d.end()),
220 [&columns] (DescriptorId_t needle) { return columns.count(needle) == 0; });
224 itr->second.fColumnSet =
d;
228 decltype(fMap)::iterator begin() {
return fMap.begin(); }
229 decltype(fMap)::iterator end() {
return fMap.end(); }
238 const auto &desc = fPageSource.GetDescriptor();
241 std::set<DescriptorId_t> keep;
242 auto prev = clusterId;
243 for (
unsigned int i = 0; i < fWindowPre; ++i) {
244 prev = desc.FindPrevClusterId(prev);
252 RProvides::RInfo provideInfo;
253 provideInfo.fColumnSet = columns;
254 provideInfo.fBunchId = fBunchId;
255 provideInfo.fFlags = RProvides::kFlagRequired;
256 for (
DescriptorId_t i = 0, next = clusterId; i < 2 * fClusterBunchSize; ++i) {
257 if (i == fClusterBunchSize)
258 provideInfo.fBunchId = ++fBunchId;
261 next = desc.FindNextClusterId(cid);
263 provideInfo.fFlags |= RProvides::kFlagLast;
265 provide.Insert(cid, provideInfo);
269 provideInfo.fFlags = 0;
273 for (
auto &cptr : fPool) {
276 if (provide.Contains(cptr->GetId()) > 0)
278 if (keep.count(cptr->GetId()) > 0)
290 std::lock_guard<std::mutex> lockGuard(fLockWorkQueue);
292 for (
auto itr = fInFlightClusters.begin(); itr != fInFlightClusters.end(); ) {
295 !provide.Contains(itr->fClusterKey.fClusterId) && (keep.count(itr->fClusterKey.fClusterId) == 0);
297 if (itr->fFuture.wait_for(std::chrono::seconds(0)) != std::future_status::ready) {
299 provide.Erase(itr->fClusterKey.fClusterId, itr->fClusterKey.fColumnSet);
304 auto cptr = itr->fFuture.get();
306 if (!cptr || itr->fIsExpired) {
308 itr = fInFlightClusters.erase(itr);
313 auto existingCluster = FindInPool(cptr->GetId());
314 if (existingCluster) {
315 existingCluster->Adopt(std::move(*cptr));
317 auto idxFreeSlot = FindFreeSlot();
318 fPool[idxFreeSlot] = std::move(cptr);
320 itr = fInFlightClusters.erase(itr);
324 for (
auto &cptr : fPool) {
327 provide.Erase(cptr->GetId(), cptr->GetAvailColumns());
331 bool skipPrefetch =
false;
332 if (provide.GetSize() < fClusterBunchSize) {
334 for (
const auto &kv : provide) {
335 if ((kv.second.fFlags & (RProvides::kFlagRequired | RProvides::kFlagLast)) == 0)
337 skipPrefetch =
false;
347 for (
const auto &kv : provide) {
348 R__ASSERT(!kv.second.fColumnSet.empty());
352 readItem.
fBunchId = kv.second.fBunchId;
359 fInFlightClusters.emplace_back(std::move(inFlightCluster));
361 fReadQueue.emplace(std::move(readItem));
363 if (fReadQueue.size() > 0)
364 fCvHasReadWork.notify_one();
368 return WaitFor(clusterId, columns);
378 auto result = FindInPool(clusterId);
380 bool hasMissingColumn =
false;
381 for (
auto cid : columns) {
382 if (result->ContainsColumn(cid))
385 hasMissingColumn =
true;
388 if (!hasMissingColumn)
393 decltype(fInFlightClusters)::iterator itr;
395 std::lock_guard<std::mutex> lockGuardInFlightClusters(fLockWorkQueue);
396 itr = fInFlightClusters.begin();
397 for (; itr != fInFlightClusters.end(); ++itr) {
398 if (itr->fClusterKey.fClusterId == clusterId)
401 R__ASSERT(itr != fInFlightClusters.end());
408 auto cptr = itr->fFuture.get();
410 result->Adopt(std::move(*cptr));
412 auto idxFreeSlot = FindFreeSlot();
413 fPool[idxFreeSlot] = std::move(cptr);
416 std::lock_guard<std::mutex> lockGuardInFlightClusters(fLockWorkQueue);
417 fInFlightClusters.erase(itr);
425 decltype(fInFlightClusters)::iterator itr;
427 std::lock_guard<std::mutex> lockGuardInFlightClusters(fLockWorkQueue);
428 itr = fInFlightClusters.begin();
429 if (itr == fInFlightClusters.end())
435 std::lock_guard<std::mutex> lockGuardInFlightClusters(fLockWorkQueue);
436 fInFlightClusters.erase(itr);
Managed a set of clusters containing compressed and packed pages.
RCluster * FindInPool(DescriptorId_t clusterId) const
Every cluster id has at most one corresponding RCluster pointer in the pool.
std::vector< std::unique_ptr< RCluster > > fPool
The cache of clusters around the currently active cluster.
void ExecUnzipClusters()
The unzip thread routine which takes a loaded cluster and passes it to fPageSource....
size_t FindFreeSlot() const
Returns an index of an unused element in fPool; callers of this function (GetCluster() and WaitFor())...
void WaitForInFlightClusters()
Used by the unit tests to drain the queue of clusters to be preloaded.
RClusterPool(RPageSource &pageSource, unsigned int clusterBunchSize)
void ExecReadClusters()
The I/O thread routine, there is exactly one I/O thread in-flight for every cluster pool.
std::thread fThreadUnzip
The unzip thread takes a loaded cluster and passes it to fPageSource->UnzipCluster() on it.
RCluster * GetCluster(DescriptorId_t clusterId, const RCluster::ColumnSet_t &columns)
Returns the requested cluster either from the pool or, in case of a cache miss, lets the I/O thread l...
RPageSource & fPageSource
Every cluster pool is responsible for exactly one page source that triggers loading of the clusters (...
std::thread fThreadIo
The I/O thread calls RPageSource::LoadClusters() asynchronously.
unsigned int fClusterBunchSize
The number of clusters that are being read in a single vector read.
RCluster * WaitFor(DescriptorId_t clusterId, const RCluster::ColumnSet_t &columns)
Returns the given cluster from the pool, which needs to contain at least the columns columns.
An in-memory subset of the packed and compressed pages of a cluster.
std::unordered_set< DescriptorId_t > ColumnSet_t
Abstract interface to read data from an ntuple.
std::uint64_t DescriptorId_t
Distriniguishes elements of the same type within a descriptor, e.g. different fields.
constexpr DescriptorId_t kInvalidDescriptorId
void Erase(const T &that, std::vector< T > &v)
Erase that element from vector v
Clusters that are currently being processed by the pipeline.
bool operator<(const RInFlightCluster &other) const
First order by cluster id, then by number of columns, than by the column ids in fColumns.
RCluster::RKey fClusterKey
std::future< std::unique_ptr< RCluster > > fFuture
Request to load a subset of the columns of a particular cluster.
std::int64_t fBunchId
Items with different bunch ids are scheduled for different vector reads.
std::promise< std::unique_ptr< RCluster > > fPromise
RCluster::RKey fClusterKey
Request to decompress and if necessary unpack compressed pages.
DescriptorId_t fClusterId