Skip to content

Commit 1cbc86a

Browse files
committed
additional logic for historic plugin
Signed-off-by: Atanas Atanasov <a.v.atanasov98@gmail.com>
1 parent c0ccbb9 commit 1cbc86a

File tree

2 files changed

+45
-1
lines changed

2 files changed

+45
-1
lines changed

block-node/block-providers/files.historic/src/main/java/org/hiero/block/node/blocks/files/historic/BlocksFilesHistoricPlugin.java

Lines changed: 35 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
import java.io.IOException;
99
import java.lang.System.Logger.Level;
1010
import java.nio.file.Files;
11+
import java.nio.file.Path;
1112
import java.util.List;
1213
import java.util.Objects;
1314
import java.util.concurrent.CopyOnWriteArrayList;
@@ -44,6 +45,8 @@ public final class BlocksFilesHistoricPlugin implements BlockProviderPlugin, Blo
4445
private final CopyOnWriteArrayList<LongRange> inProgressZipRanges = new CopyOnWriteArrayList<>();
4546
/** Running total of bytes stored in the historic tier */
4647
private final AtomicLong totalBytesStored = new AtomicLong(0);
48+
/** The config used for this plugin */
49+
private FilesHistoricConfig config;
4750
/** The Storage Retention Policy Threshold */
4851
private long retentionPolicyThreshold;
4952

@@ -74,7 +77,7 @@ public List<Class<? extends Record>> configDataTypes() {
7477
@Override
7578
public void init(final BlockNodeContext context, final ServiceBuilder serviceBuilder) {
7679
this.context = Objects.requireNonNull(context);
77-
final FilesHistoricConfig config = context.configuration().getConfigData(FilesHistoricConfig.class);
80+
this.config = context.configuration().getConfigData(FilesHistoricConfig.class);
7881
this.retentionPolicyThreshold = context.storageRetentionPolicyThreshold();
7982
// Initialize metrics
8083
initMetrics(context.metrics());
@@ -198,6 +201,11 @@ public BlockRangeSet availableBlocks() {
198201
public void handlePersisted(PersistedNotification notification) {
199202
if (notification.blockProviderPriority() > defaultPriority()) {
200203
attemptZipping();
204+
// @todo(1268) like in the recents plugin, should we cleanup only if the priority check passes?
205+
// here I think yes, because we only get new data if the check passes, so there is no issue
206+
// to keep within the bounds of the retention policy threshold. But for the recents plugin,
207+
// maybe we need to think about it since it gets new data via the verification notification.
208+
cleanup();
201209
} // todo this is not enough of an assertion that the blocks will be coming from the right place
202210
// as notifications are async and things can happen, when we get the accessors later, we should
203211
// be able to get accessors only from places that have higher priority than us. We should probably
@@ -226,6 +234,32 @@ private void attemptZipping() {
226234
}
227235
}
228236

237+
private void cleanup() {
238+
final long totalStored = availableBlocks.size();
239+
long excess = totalStored - retentionPolicyThreshold;
240+
while (excess >= numberOfBlocksPerZipFile) {
241+
// if we have passed the above check, we can delete at least one zip file
242+
// we assume there are no gaps in the zips, the number of blocks per zip file
243+
// setting is not possible to change after starting the system for the first time,
244+
// also the number of blocks per zip file is a power of ten, so we can safely
245+
// say that whatever the range is, it will always start/end with a predictable number
246+
// e.g. 0-9, 10-19, 20-29 (batch 10s) or 10_000-19_999, 20_000-29_999 (batch 1000s) etc.
247+
// depending on the setting
248+
final long minBlockNumberStored = availableBlocks.min();
249+
// no need to compute existing below, we need the path to the zip file, we do not need to
250+
// check if it exists, moreover we do not need to know actual block compression type.
251+
final Path zipToDelete =
252+
BlockPath.computeBlockPath(config, minBlockNumberStored).zipFilePath();
253+
try {
254+
Files.deleteIfExists(zipToDelete);
255+
} catch (final IOException e) {
256+
throw new RuntimeException(e);
257+
// @todo(1268) what to do here if we cannot delete the zip file?
258+
}
259+
excess -= numberOfBlocksPerZipFile;
260+
}
261+
}
262+
229263
/**
230264
* Start moving a batch of blocks to a zip file in background as long as batch is not already in progress or queued
231265
* to be started.

block-node/block-providers/files.recent/src/main/java/org/hiero/block/node/blocks/files/recent/BlocksFilesRecentPlugin.java

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -258,6 +258,16 @@ public void handlePersisted(PersistedNotification notification) {
258258
long excess = totalStored - retentionPolicyThreshold;
259259
availableBlocks.stream().sorted().limit(excess).forEach(this::delete);
260260
}
261+
// @todo(1268) Not sure if we still would want to filter by notification provider priority.
262+
// In theory the new retention logic no longer supports that. We no longer delete the
263+
// range of the notification. More than that, if we have only this plugin that will fire
264+
// such notifications, effectively we will never cleanup. I think that should also be
265+
// changed. Another way to do this is to delete in the verification notification method.
266+
// In theory from there we get the new data and the idea of the policy is to be bound to
267+
// block count and delete only if new data arrives. Maybe we could drop the handle
268+
// persisted here all together? But we have to be careful because a big delete might
269+
// take a long while and we do not want to hang the notification thread too long cause
270+
// persistence relies on it (verification notification method). Am I missing something?
261271
}
262272

263273
// ==== Action Methods =============================================================================================

0 commit comments

Comments
 (0)