@@ -123,6 +123,7 @@ ShmSegmentOpts CacheAllocator<CacheTrait>::createShmCacheOpts(TierId tid) {
123
123
124
124
template <typename CacheTrait>
125
125
size_t CacheAllocator<CacheTrait>::memoryTierSize(TierId tid) const {
126
+ auto & memoryTierConfigs = config_.memoryTierConfigs ;
126
127
auto partitions = std::accumulate (memoryTierConfigs.begin (), memoryTierConfigs.end (), 0UL ,
127
128
[](const size_t i, const MemoryTierCacheConfig& config){
128
129
return i + config.getRatio ();
@@ -1231,7 +1232,7 @@ CacheAllocator<CacheTrait>::insertOrReplace(const WriteHandle& handle) {
1231
1232
* Concurrent threads which are getting handle to the same key:
1232
1233
* 1. When a handle is created it checks if the moving flag is set
1233
1234
* 2. If so, Handle implementation creates waitContext and adds it to the
1234
- * MoveCtx by calling handleWithWaitContextForMovingItem () method.
1235
+ * MoveCtx by calling tryGetHandleWithWaitContextForMovingItem () method.
1235
1236
* 3. Wait until the moving thread will complete its job.
1236
1237
*/
1237
1238
template <typename CacheTrait>
@@ -1399,9 +1400,10 @@ CacheAllocator<CacheTrait>::getNextCandidate(TierId tid,
1399
1400
Item* toRecycle = nullptr ;
1400
1401
Item* candidate = nullptr ;
1401
1402
auto & mmContainer = getMMContainer (tid, pid, cid);
1403
+ bool lastTier = tid+1 >= getNumTiers ();
1402
1404
1403
1405
mmContainer.withEvictionIterator ([this , pid, cid, &candidate, &toRecycle,
1404
- &searchTries, &mmContainer,
1406
+ &searchTries, &mmContainer, &lastTier,
1405
1407
&token](auto && itr) {
1406
1408
if (!itr) {
1407
1409
++searchTries;
@@ -1421,16 +1423,21 @@ CacheAllocator<CacheTrait>::getNextCandidate(TierId tid,
1421
1423
? &toRecycle_->asChainedItem ().getParentItem (compressor_)
1422
1424
: toRecycle_;
1423
1425
1424
- auto putToken = createPutToken (*candidate_);
1426
+ typename NvmCacheT::PutToken putToken;
1427
+ if (lastTier) {
1428
+ // if it's last tier, the item will be evicted
1429
+ // need to create put token before marking it exclusive
1430
+ putToken = createPutToken (*candidate_);
1431
+ }
1425
1432
1426
- if (shouldWriteToNvmCache (*candidate_) && !putToken.isValid ()) {
1433
+ if (lastTier && shouldWriteToNvmCache (*candidate_) && !putToken.isValid ()) {
1427
1434
stats_.evictFailConcurrentFill .inc ();
1428
1435
++itr;
1429
1436
continue ;
1430
1437
}
1431
1438
1432
- auto markedForEviction = candidate_->markForEviction ();
1433
- if (!markedForEviction ) {
1439
+ auto marked = lastTier ? candidate_->markForEviction () : candidate_-> markMoving ();
1440
+ if (!marked ) {
1434
1441
if (candidate_->hasChainedItem ()) {
1435
1442
stats_.evictFailParentAC .inc ();
1436
1443
} else {
@@ -1440,8 +1447,10 @@ CacheAllocator<CacheTrait>::getNextCandidate(TierId tid,
1440
1447
continue ;
1441
1448
}
1442
1449
1450
+ XDCHECK (candidate_->isMoving () || candidate_->isMarkedForEviction ());
1443
1451
// markForEviction to make sure no other thead is evicting the item
1444
- // nor holding a handle to that item
1452
+ // nor holding a handle to that item if this is last tier
1453
+ // since we won't be moving the item to the next tier
1445
1454
toRecycle = toRecycle_;
1446
1455
candidate = candidate_;
1447
1456
token = std::move (putToken);
@@ -1464,13 +1473,44 @@ CacheAllocator<CacheTrait>::getNextCandidate(TierId tid,
1464
1473
1465
1474
XDCHECK (toRecycle);
1466
1475
XDCHECK (candidate);
1467
- XDCHECK (candidate->isMarkedForEviction ());
1476
+ XDCHECK (candidate->isMoving () || candidate-> isMarkedForEviction ());
1468
1477
1469
- unlinkItemForEviction (*candidate);
1478
+ auto evictedToNext = lastTier ? nullptr
1479
+ : tryEvictToNextMemoryTier (*candidate, false );
1480
+ if (!evictedToNext) {
1481
+ if (!token.isValid ()) {
1482
+ token = createPutToken (*candidate);
1483
+ }
1484
+ // tryEvictToNextMemoryTier should only fail if allocation of the new item fails
1485
+ // in that case, it should be still possible to mark item as exclusive.
1486
+ //
1487
+ // in case that we are on the last tier, we whould have already marked
1488
+ // as exclusive since we will not be moving the item to the next tier
1489
+ // but rather just evicting all together, no need to
1490
+ // markExclusiveWhenMoving
1491
+ auto ret = lastTier ? true : candidate->markForEvictionWhenMoving ();
1492
+ XDCHECK (ret);
1493
+
1494
+ unlinkItemForEviction (*candidate);
1495
+ // wake up any readers that wait for the move to complete
1496
+ // it's safe to do now, as we have the item marked exclusive and
1497
+ // no other reader can be added to the waiters list
1498
+ wakeUpWaiters (candidate->getKey (), {});
1499
+
1500
+ if (token.isValid () && shouldWriteToNvmCacheExclusive (*candidate)) {
1501
+ nvmCache_->put (*candidate, std::move (token));
1502
+ }
1503
+ } else {
1504
+ XDCHECK (!evictedToNext->isMarkedForEviction () && !evictedToNext->isMoving ());
1505
+ XDCHECK (!candidate->isMarkedForEviction () && !candidate->isMoving ());
1506
+ XDCHECK (!candidate->isAccessible ());
1507
+ XDCHECK (candidate->getKey () == evictedToNext->getKey ());
1470
1508
1471
- if (token.isValid () && shouldWriteToNvmCacheExclusive (*candidate)) {
1472
- nvmCache_->put (*candidate, std::move (token));
1509
+ wakeUpWaiters (candidate->getKey (), std::move (evictedToNext));
1473
1510
}
1511
+
1512
+ XDCHECK (!candidate->isMarkedForEviction () && !candidate->isMoving ());
1513
+
1474
1514
return {candidate, toRecycle};
1475
1515
}
1476
1516
@@ -1563,6 +1603,54 @@ bool CacheAllocator<CacheTrait>::shouldWriteToNvmCacheExclusive(
1563
1603
return true ;
1564
1604
}
1565
1605
1606
+ template <typename CacheTrait>
1607
+ typename CacheAllocator<CacheTrait>::WriteHandle
1608
+ CacheAllocator<CacheTrait>::tryEvictToNextMemoryTier(
1609
+ TierId tid, PoolId pid, Item& item, bool fromBgThread) {
1610
+ XDCHECK (item.isMoving ());
1611
+ XDCHECK (item.getRefCount () == 0 );
1612
+ if (item.hasChainedItem ()) return WriteHandle{}; // TODO: We do not support ChainedItem yet
1613
+ if (item.isExpired ()) {
1614
+ accessContainer_->remove (item);
1615
+ item.unmarkMoving ();
1616
+ return acquire (&item);
1617
+ }
1618
+
1619
+ TierId nextTier = tid; // TODO - calculate this based on some admission policy
1620
+ while (++nextTier < getNumTiers ()) { // try to evict down to the next memory tiers
1621
+ // allocateInternal might trigger another eviction
1622
+ auto newItemHdl = allocateInternalTier (nextTier, pid,
1623
+ item.getKey (),
1624
+ item.getSize (),
1625
+ item.getCreationTime (),
1626
+ item.getExpiryTime (),
1627
+ fromBgThread);
1628
+
1629
+ if (newItemHdl) {
1630
+
1631
+ bool moveSuccess = moveRegularItem (item, newItemHdl);
1632
+ if (!moveSuccess) {
1633
+ return WriteHandle{};
1634
+ }
1635
+ XDCHECK_EQ (newItemHdl->getSize (), item.getSize ());
1636
+ item.unmarkMoving ();
1637
+ return newItemHdl;
1638
+ } else {
1639
+ return WriteHandle{};
1640
+ }
1641
+ }
1642
+
1643
+ return {};
1644
+ }
1645
+
1646
+ template <typename CacheTrait>
1647
+ typename CacheAllocator<CacheTrait>::WriteHandle
1648
+ CacheAllocator<CacheTrait>::tryEvictToNextMemoryTier(Item& item, bool fromBgThread) {
1649
+ auto tid = getTierId (item);
1650
+ auto pid = allocator_[tid]->getAllocInfo (item.getMemory ()).poolId ;
1651
+ return tryEvictToNextMemoryTier (tid, pid, item, fromBgThread);
1652
+ }
1653
+
1566
1654
template <typename CacheTrait>
1567
1655
typename CacheAllocator<CacheTrait>::RemoveRes
1568
1656
CacheAllocator<CacheTrait>::remove(typename Item::Key key) {
@@ -2088,8 +2176,7 @@ std::vector<std::string> CacheAllocator<CacheTrait>::dumpEvictionIterator(
2088
2176
2089
2177
std::vector<std::string> content;
2090
2178
2091
- size_t i = 0 ;
2092
- while (i < numItems && tid >= 0 ) {
2179
+ while (tid >= 0 ) {
2093
2180
auto & mm = *mmContainers_[tid][pid][cid];
2094
2181
mm.withEvictionIterator ([&content, numItems](auto && itr) {
2095
2182
while (itr && content.size () < numItems) {
@@ -2099,7 +2186,6 @@ std::vector<std::string> CacheAllocator<CacheTrait>::dumpEvictionIterator(
2099
2186
});
2100
2187
--tid;
2101
2188
}
2102
-
2103
2189
return content;
2104
2190
}
2105
2191
0 commit comments