Skip to content

Commit e40febd

Browse files
committed
Fixed vault-analysis-json chopping data
1 parent db2f1d1 commit e40febd

File tree

4 files changed

+10043
-3325
lines changed

4 files changed

+10043
-3325
lines changed

docs/source/tutorials/erc-4626-single-vault.ipynb

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3993,7 +3993,7 @@
39933993
},
39943994
{
39953995
"cell_type": "code",
3996-
"execution_count": 2,
3996+
"execution_count": null,
39973997
"id": "9225d6651c0a6852",
39983998
"metadata": {
39993999
"ExecuteTime": {
@@ -4011,7 +4011,6 @@
40114011
}
40124012
],
40134013
"source": [
4014-
"import pickle\n",
40154014
"from pathlib import Path\n",
40164015
"\n",
40174016
"from eth_defi.vault.vaultdb import VaultDatabase\n",

docs/source/tutorials/erc-4626-vaults-per-chain.ipynb

Lines changed: 10036 additions & 3308 deletions
Large diffs are not rendered by default.

eth_defi/research/vault_metrics.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -497,8 +497,11 @@ def process_vault_group(group):
497497
# We may have severeal division by zero if the share price starts at 0
498498
warnings.simplefilter("ignore", RuntimeWarning)
499499

500-
lifetime_start_date = start_date = group.index.min()
501-
lifetime_end_date = end_date = group.index.max()
500+
# 2) Ensure group index is monotonic and clean
501+
group = group.loc[~group.index.isna()].sort_index(kind="stable")
502+
503+
lifetime_start_date = start_date = group.index[0]
504+
lifetime_end_date = end_date = group.index[-1]
502505

503506
lifetime_return = group.iloc[-1]["share_price"] / group.iloc[0]["share_price"] - 1
504507

scripts/erc-4626/vault-analysis-json.py

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -192,28 +192,16 @@ def find_non_serializable_paths(obj, path=None, results=None):
192192
vault_db = VaultDatabase.read()
193193
cleaned_data_parquet_file = PARQUET_FILE
194194
prices_df = pd.read_parquet(cleaned_data_parquet_file)
195-
196195
print(f"We have {len(vault_db):,} vaults in the database and {len(prices_df):,} price rows.")
197196

198-
# --------------------------------------------------------------------
199-
# Step 3: Filter data for the last N months
200-
# --------------------------------------------------------------------
201-
last_sample_at = prices_df.index[-1] # Latest timestamp
202-
three_months_ago = last_sample_at - pd.DateOffset(months=MONTHS)
203-
PERIOD = [three_months_ago, last_sample_at]
204-
205-
mask = (prices_df.index >= PERIOD[0]) & (prices_df.index <= PERIOD[1])
206-
prices_df = prices_df[mask]
207-
print(f"✅ Trimmed to {len(prices_df):,} rows from {PERIOD[0]} to {PERIOD[1]}")
208-
209197
# --------------------------------------------------------------------
210198
# Step 4: Examine per-chain data availability
211199
# --------------------------------------------------------------------
212200
chain_ids = sorted(prices_df["chain"].unique())
213201
for chain_id in chain_ids:
214202
chain_name = get_chain_name(chain_id)
215203
print(f"\n🔍 Examining chain {chain_name} ({chain_id})")
216-
chain_prices_df = prices_df[(prices_df["chain"] == chain_id) & (prices_df.index >= PERIOD[0]) & (prices_df.index <= PERIOD[1])]
204+
chain_prices_df = prices_df[(prices_df["chain"] == chain_id)]
217205
print(f"📈 Rows: {len(chain_prices_df):,} for chain {chain_name}")
218206
if not chain_prices_df.empty:
219207
print(chain_prices_df.head(1))

0 commit comments

Comments
 (0)