@@ -58,18 +58,20 @@ proc fetchAndCheck(
58
58
for n in 1 u ..< ivReq.len:
59
59
let header = ctx.dbHeaderPeek(ivReq.minPt + n).valueOr:
60
60
# There is nothing one can do here
61
- info " Block header missing, requesting reorg" , ivReq, n,
61
+ info " Block header missing ( reorg triggered) " , ivReq, n,
62
62
nth= (ivReq.minPt + n).bnStr
63
63
# So require reorg
64
+ blk.blocks.setLen(offset)
64
65
ctx.poolMode = true
65
66
return false
66
67
blockHash[n - 1 ] = header.parentHash
67
68
blk.blocks[offset + n].header = header
68
69
blk.blocks[offset].header = ctx.dbHeaderPeek(ivReq.minPt).valueOr:
69
70
# There is nothing one can do here
70
- info " Block header missing, requesting reorg" , ivReq, n= 0 ,
71
+ info " Block header missing ( reorg triggered) " , ivReq, n= 0 ,
71
72
nth= ivReq.minPt.bnStr
72
73
# So require reorg
74
+ blk.blocks.setLen(offset)
73
75
ctx.poolMode = true
74
76
return false
75
77
blockHash[ivReq.len - 1 ] =
@@ -100,15 +102,17 @@ proc fetchAndCheck(
100
102
# Oops, cut off the rest
101
103
blk.blocks.setLen(offset + n)
102
104
buddy.fetchRegisterError()
103
- trace info & " : fetch bodies cut off junk" , peer= buddy.peer, ivReq,
104
- n, nTxs= bodies[n].transactions.len, nBodies,
105
- nRespErrors= buddy.only.nBdyRespErrors
105
+ trace info & " : cut off fetched junk" , peer= buddy.peer, ivReq, n,
106
+ nTxs= bodies[n].transactions.len, nBodies, bdyErrors= buddy.bdyErrors
106
107
break loop
107
108
108
109
blk.blocks[offset + n].transactions = bodies[n].transactions
109
110
blk.blocks[offset + n].uncles = bodies[n].uncles
110
111
blk.blocks[offset + n].withdrawals = bodies[n].withdrawals
111
112
113
+ # Remove stashed header
114
+ ctx.dbHeaderUnstash blk.blocks[offset + n].header.number
115
+
112
116
if offset < blk.blocks.len.uint64 :
113
117
return true
114
118
@@ -122,33 +126,46 @@ proc fetchAndCheck(
122
126
func blocksStagedCanImportOk* (ctx: BeaconCtxRef): bool =
123
127
# # Check whether the queue is at its maximum size so import can start with
124
128
# # a full queue.
125
- if ctx.pool.blocksStagedQuLenMax <= ctx.blk.staged.len:
126
- return true
129
+ # #
130
+ if ctx.poolMode:
131
+ # Re-org is scheduled
132
+ return false
127
133
128
134
if 0 < ctx.blk.staged.len:
129
- # Import if what is on the queue is all we have got.
130
- if ctx.blocksUnprocIsEmpty() and ctx.blocksUnprocBorrowed() == 0 :
131
- return true
132
- # Import if there is currently no peer active
133
- if ctx.pool.nBuddies == 0 :
135
+ # Import if what is on the queue is all we have got. Note that the function
136
+ # `blocksUnprocIsEmpty()` returns `true` only if all blocks possible have
137
+ # been fetched (i.e. the `borrowed` list is also empty.)
138
+ if ctx.blocksUnprocIsEmpty():
134
139
return true
135
140
141
+ # Make sure that the lowest block is available, already. Or the other way
142
+ # round: no unprocessed block number range precedes the least staged block.
143
+ if ctx.blk.staged.ge(0 ).value.key < ctx.blocksUnprocTotalBottom():
144
+ # Also suggest importing blocks if there is currently no peer active.
145
+ # The `unprocessed` ranges will contain some higher number block ranges,
146
+ # but these can be fetched later.
147
+ if ctx.pool.nBuddies == 0 :
148
+ return true
149
+
150
+ # Start importing if the queue is long enough.
151
+ if ctx.pool.blocksStagedQuLenMax <= ctx.blk.staged.len:
152
+ return true
153
+
136
154
false
137
155
138
156
139
157
func blocksStagedFetchOk* (ctx: BeaconCtxRef): bool =
140
158
# # Check whether body records can be fetched and stored on the `staged` queue.
141
159
# #
142
- let uBottom = ctx.blocksUnprocBottom()
143
- if uBottom < high(BlockNumber):
160
+ if 0 < ctx.blocksUnprocAvail():
144
161
# Not to start fetching while the queue is busy (i.e. larger than Lwm)
145
162
# so that import might still be running strong.
146
163
if ctx.blk.staged.len < ctx.pool.blocksStagedQuLenMax:
147
164
return true
148
165
149
166
# Make sure that there is no gap at the bottom which needs to be
150
- # addressed regardless of the length of the queue.
151
- if uBottom < ctx.blk.staged.ge(0 ).value.key:
167
+ # fetched regardless of the length of the queue.
168
+ if ctx.blocksUnprocAvailBottom() < ctx.blk.staged.ge(0 ).value.key:
152
169
return true
153
170
154
171
false
@@ -160,7 +177,7 @@ proc blocksStagedCollect*(
160
177
): Future[bool ] {.async: (raises: []).} =
161
178
# # Collect bodies and stage them.
162
179
# #
163
- if buddy.ctx.blocksUnprocIsEmpty() :
180
+ if buddy.ctx.blocksUnprocAvail() == 0 :
164
181
# Nothing to do
165
182
return false
166
183
@@ -203,7 +220,7 @@ proc blocksStagedCollect*(
203
220
if not await buddy.fetchAndCheck(ivReq, blk, info):
204
221
if ctx.poolMode:
205
222
# Reorg requested?
206
- ctx.blocksUnprocCommit(iv.len , iv)
223
+ ctx.blocksUnprocCommit(iv, iv)
207
224
return false
208
225
209
226
haveError = true
@@ -222,7 +239,7 @@ proc blocksStagedCollect*(
222
239
nStaged= ctx.blk.staged.len, ctrl= buddy.ctrl.state,
223
240
bdyErrors= buddy.bdyErrors
224
241
225
- ctx.blocksUnprocCommit(iv.len , iv)
242
+ ctx.blocksUnprocCommit(iv, iv)
226
243
# At this stage allow a task switch so that some other peer might try
227
244
# to work on the currently returned interval.
228
245
try : await sleepAsync asyncThreadSwitchTimeSlot
@@ -234,22 +251,22 @@ proc blocksStagedCollect*(
234
251
trace info & " : list partially failed" , peer, iv, ivReq,
235
252
unused= BnRange.new(ivBottom,iv.maxPt)
236
253
# There is some left over to store back
237
- ctx.blocksUnprocCommit(iv.len , ivBottom, iv.maxPt)
254
+ ctx.blocksUnprocCommit(iv, ivBottom, iv.maxPt)
238
255
break
239
256
240
257
# Update remaining interval
241
258
let ivRespLen = blk.blocks.len - nBlkBlocks
242
259
if iv.maxPt < ivBottom + ivRespLen.uint64 :
243
260
# All collected
244
- ctx.blocksUnprocCommit(iv.len )
261
+ ctx.blocksUnprocCommit(iv)
245
262
break
246
263
247
264
ivBottom += ivRespLen.uint64 # will mostly result into `ivReq.maxPt+1`
248
265
249
266
if buddy.ctrl.stopped:
250
267
# There is some left over to store back. And `ivBottom <= iv.maxPt`
251
268
# because of the check against `ivRespLen` above.
252
- ctx.blocksUnprocCommit(iv.len , ivBottom, iv.maxPt)
269
+ ctx.blocksUnprocCommit(iv, ivBottom, iv.maxPt)
253
270
break
254
271
255
272
# Store `blk` chain on the `staged` queue
@@ -276,21 +293,16 @@ proc blocksStagedImport*(
276
293
# # Import/execute blocks record from staged queue
277
294
# #
278
295
let qItem = ctx.blk.staged.ge(0 ).valueOr:
296
+ # Empty queue
279
297
return false
280
298
281
- # Fetch least record, accept only if it matches the global ledger state
282
- block :
283
- let imported = ctx.chain.latestNumber()
284
- if imported + 1 < qItem.key:
285
- # If there is a gap, the `FC` module data area might have been re-set (or
286
- # some problem occured due to concurrent collection.) In any case, the
287
- # missing block numbers are added to the range of blocks that need to be
288
- # fetched.
289
- ctx.blocksUnprocAmend(imported + 1 , qItem.key - 1 )
290
- trace info & " : there is a gap L vs. staged" ,
291
- B= ctx.chain.baseNumber.bnStr, L= imported.bnStr, staged= qItem.key.bnStr,
292
- C= ctx.layout.coupler.bnStr
293
- return false
299
+ # Make sure that the lowest block is available, already. Or the other way
300
+ # round: no unprocessed block number range precedes the least staged block.
301
+ let uBottom = ctx.blocksUnprocTotalBottom()
302
+ if uBottom < qItem.key:
303
+ trace info & " : block queue not ready yet" , nBuddies= ctx.pool.nBuddies,
304
+ unprocBottom= uBottom.bnStr, least= qItem.key.bnStr
305
+ return false
294
306
295
307
# Remove from queue
296
308
discard ctx.blk.staged.delete qItem.key
@@ -306,32 +318,27 @@ proc blocksStagedImport*(
306
318
var maxImport = iv.maxPt
307
319
block importLoop:
308
320
for n in 0 ..< nBlocks:
309
- # It is known that `key <= imported + 1`. This means that some blocks
310
- # potentally overlap with what is already known by `FC` (e.g. due to
311
- # concurrently running `importBlock()` by a `newPayload` RPC requests.)
312
- #
313
- # It is not left to `FC` to ignore this record. Passing a block before
314
- # the `base` (which also might have changed) is responded by `FC` with
315
- # an error. This would cause throwing away all `nBlocks` rather than
316
- # ignoring the first some.
317
- #
318
321
let nBn = qItem.data.blocks[n].header.number
319
322
if nBn <= ctx.chain.baseNumber:
320
- trace info & " : ignoring block <= base" , n, iv,
323
+ trace info & " : ignoring block less eq. base" , n, iv,
321
324
B= ctx.chain.baseNumber.bnStr, L= ctx.chain.latestNumber.bnStr,
322
325
nthBn= nBn.bnStr, nthHash= qItem.data.getNthHash(n).short
323
326
continue
324
327
ctx.pool.chain.importBlock(qItem.data.blocks[n]) .isOkOr:
325
- warn info & " : import block error" , n, iv,
328
+ # The way out here is simply to re-compile the block queue. At any
329
+ # point, the `FC` module data area might have been moved to a new
330
+ # canonocal branch.
331
+ #
332
+ ctx.poolMode = true
333
+ warn info & " : import block error (reorg triggered)" , n, iv,
326
334
B= ctx.chain.baseNumber.bnStr, L= ctx.chain.latestNumber.bnStr,
327
- nthBn= nBn.bnStr, nthHash= qItem.data.getNthHash(n).short, `error`= error
328
- # Restore what is left over below
329
- maxImport = ctx.chain.latestNumber()
335
+ nthBn= nBn.bnStr, nthHash= qItem.data.getNthHash(n).short,
336
+ `error`= error
330
337
break importLoop
331
338
332
339
# Allow pseudo/async thread switch.
333
340
(await ctx.updateAsyncTasks()).isOkOr:
334
- maxImport = ctx.chain.latestNumber()
341
+ maxImport = nBn # shutdown?
335
342
break importLoop
336
343
337
344
# Occasionally mark the chain finalized
@@ -343,29 +350,26 @@ proc blocksStagedImport*(
343
350
344
351
doAssert nBn == ctx.chain.latestNumber()
345
352
ctx.pool.chain.forkChoice(nthHash, finHash).isOkOr:
346
- warn info & " : fork choice error" , n, iv,
353
+ warn info & " : fork choice error (reorg triggered) " , n, iv,
347
354
B= ctx.chain.baseNumber.bnStr, L= ctx.chain.latestNumber.bnStr,
348
355
F= ctx.layout.final.bnStr, nthBn= nBn.bnStr, nthHash= nthHash.short,
349
356
finHash= (if finHash == nthHash: " nthHash" else : " F" ), `error`= error
350
357
# Restore what is left over below
351
- maxImport = ctx.chain.latestNumber()
358
+ ctx.poolMode = true
352
359
break importLoop
353
360
354
361
# Allow pseudo/async thread switch.
355
362
(await ctx.updateAsyncTasks()).isOkOr:
356
- maxImport = ctx.chain.latestNumber()
363
+ maxImport = nBn # shutdown?
357
364
break importLoop
358
365
359
366
# Import probably incomplete, so a partial roll back may be needed
360
367
if maxImport < iv.maxPt:
361
- ctx.blocksUnprocCommit(0 , maxImport+ 1 , qItem.data.blocks[^ 1 ].header.number)
362
-
363
- # Remove stashed headers for imported blocks
364
- for bn in iv.minPt .. maxImport:
365
- ctx.dbHeaderUnstash bn
368
+ ctx.blocksUnprocAppend(maxImport+ 1 , iv.maxPt)
366
369
367
370
info " Import done" , iv, nBlocks, base= ctx.chain.baseNumber.bnStr,
368
371
head= ctx.chain.latestNumber.bnStr, target= ctx.layout.final.bnStr
372
+
369
373
return true
370
374
371
375
@@ -382,7 +386,7 @@ proc blocksStagedReorg*(ctx: BeaconCtxRef; info: static[string]) =
382
386
# # the block queues as there won't be mant data cached, then.
383
387
# #
384
388
if ctx.blk.staged.len == 0 and
385
- ctx.blocksUnprocChunks() == 0 :
389
+ ctx.blocksUnprocIsEmpty() :
386
390
# nothing to do
387
391
return
388
392
0 commit comments