Skip to content

Commit aef68f0

Browse files
author
Ibrahim Jarif
committed
fix(replay) - Update head for LSM entires also (#1456)
#1372 tried to fix the `replay from start` issue but it partially fixed the issue. The head was not being updated in case all the entries are inserted only in the LSM tree. This commit fixes it. (cherry picked from commit 4c8fe7f)
1 parent 7d169b2 commit aef68f0

File tree

3 files changed

+52
-12
lines changed

3 files changed

+52
-12
lines changed

backup_test.go

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ func TestBackupRestore1(t *testing.T) {
112112
return nil
113113
})
114114
require.NoError(t, err)
115+
require.Equal(t, db.orc.nextTs(), uint64(3))
115116
}
116117

117118
func TestBackupRestore2(t *testing.T) {
@@ -163,6 +164,9 @@ func TestBackupRestore2(t *testing.T) {
163164
err = db2.Load(&backup, 16)
164165
require.NoError(t, err)
165166

167+
// Check nextTs is correctly set.
168+
require.Equal(t, db1.orc.nextTs(), db2.orc.nextTs())
169+
166170
for i := byte(1); i < N; i++ {
167171
err = db2.View(func(tx *Txn) error {
168172
k := append(key1, i)
@@ -210,6 +214,9 @@ func TestBackupRestore2(t *testing.T) {
210214
err = db3.Load(&backup, 16)
211215
require.NoError(t, err)
212216

217+
// Check nextTs is correctly set.
218+
require.Equal(t, db2.orc.nextTs(), db3.orc.nextTs())
219+
213220
for i := byte(1); i < N; i++ {
214221
err = db3.View(func(tx *Txn) error {
215222
k := append(key1, i)
@@ -325,6 +332,7 @@ func TestBackupRestore3(t *testing.T) {
325332
N := 1000
326333
entries := createEntries(N)
327334

335+
var db1NextTs uint64
328336
// backup
329337
{
330338
db1, err := Open(DefaultOptions(filepath.Join(tmpdir, "backup1")))
@@ -335,6 +343,8 @@ func TestBackupRestore3(t *testing.T) {
335343

336344
_, err = db1.Backup(&bb, 0)
337345
require.NoError(t, err)
346+
347+
db1NextTs = db1.orc.nextTs()
338348
require.NoError(t, db1.Close())
339349
}
340350
require.True(t, len(entries) == N)
@@ -345,7 +355,9 @@ func TestBackupRestore3(t *testing.T) {
345355
require.NoError(t, err)
346356

347357
defer db2.Close()
358+
require.NotEqual(t, db1NextTs, db2.orc.nextTs())
348359
require.NoError(t, db2.Load(&bb, 16))
360+
require.Equal(t, db1NextTs, db2.orc.nextTs())
349361

350362
// verify
351363
err = db2.View(func(txn *Txn) error {
@@ -383,6 +395,7 @@ func TestBackupLoadIncremental(t *testing.T) {
383395
updates := make(map[int]byte)
384396
var bb bytes.Buffer
385397

398+
var db1NextTs uint64
386399
// backup
387400
{
388401
db1, err := Open(DefaultOptions(filepath.Join(tmpdir, "backup2")))
@@ -439,6 +452,9 @@ func TestBackupLoadIncremental(t *testing.T) {
439452
require.NoError(t, err)
440453
_, err = db1.Backup(&bb, since)
441454
require.NoError(t, err)
455+
456+
db1NextTs = db1.orc.nextTs()
457+
442458
require.NoError(t, db1.Close())
443459
}
444460
require.True(t, len(entries) == N)
@@ -450,7 +466,9 @@ func TestBackupLoadIncremental(t *testing.T) {
450466

451467
defer db2.Close()
452468

469+
require.NotEqual(t, db1NextTs, db2.orc.nextTs())
453470
require.NoError(t, db2.Load(&bb, 16))
471+
require.Equal(t, db1NextTs, db2.orc.nextTs())
454472

455473
// verify
456474
actual := make(map[int]byte)
@@ -517,6 +535,8 @@ func TestBackupBitClear(t *testing.T) {
517535
_, err = db.Backup(bak, 0)
518536
require.NoError(t, err)
519537
require.NoError(t, bak.Close())
538+
539+
oldValue := db.orc.nextTs()
520540
require.NoError(t, db.Close())
521541

522542
opt = getTestOptions(dir)
@@ -530,6 +550,8 @@ func TestBackupBitClear(t *testing.T) {
530550
defer bak.Close()
531551

532552
require.NoError(t, db.Load(bak, 16))
553+
// Ensure nextTs is still the same.
554+
require.Equal(t, oldValue, db.orc.nextTs())
533555

534556
require.NoError(t, db.View(func(txn *Txn) error {
535557
e, err := txn.Get(key)

db.go

Lines changed: 27 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -136,11 +136,11 @@ func (db *DB) replayFunction() func(Entry, valuePointer) error {
136136
} else {
137137
nv = vp.Encode()
138138
meta = meta | bitValuePointer
139-
// Update vhead. If the crash happens while replay was in progess
140-
// and the head is not updated, we will end up replaying all the
141-
// files again.
142-
db.updateHead([]valuePointer{vp})
143139
}
140+
// Update vhead. If the crash happens while replay was in progess
141+
// and the head is not updated, we will end up replaying all the
142+
// files starting from file zero, again.
143+
db.updateHead([]valuePointer{vp})
144144

145145
v := y.ValueStruct{
146146
Value: nv,
@@ -994,24 +994,41 @@ type flushTask struct {
994994
dropPrefixes [][]byte
995995
}
996996

997-
// handleFlushTask must be run serially.
998-
func (db *DB) handleFlushTask(ft flushTask) error {
999-
// There can be a scenario, when empty memtable is flushed. For example, memtable is empty and
1000-
// after writing request to value log, rotation count exceeds db.LogRotatesToFlush.
1001-
if ft.mt.Empty() {
997+
func (db *DB) pushHead(ft flushTask) error {
998+
// We don't need to store head pointer in the in-memory mode since we will
999+
// never be replay anything.
1000+
if db.opt.InMemory {
10021001
return nil
10031002
}
1003+
// Ensure we never push a zero valued head pointer.
1004+
if ft.vptr.IsZero() {
1005+
return errors.New("Head should not be zero")
1006+
}
10041007

10051008
// Store badger head even if vptr is zero, need it for readTs
10061009
db.opt.Debugf("Storing value log head: %+v\n", ft.vptr)
1007-
db.opt.Debugf("Storing offset: %+v\n", ft.vptr)
10081010
val := ft.vptr.Encode()
10091011

10101012
// Pick the max commit ts, so in case of crash, our read ts would be higher than all the
10111013
// commits.
10121014
headTs := y.KeyWithTs(head, db.orc.nextTs())
10131015
ft.mt.Put(headTs, y.ValueStruct{Value: val})
10141016

1017+
return nil
1018+
}
1019+
1020+
// handleFlushTask must be run serially.
1021+
func (db *DB) handleFlushTask(ft flushTask) error {
1022+
// There can be a scenario, when empty memtable is flushed. For example, memtable is empty and
1023+
// after writing request to value log, rotation count exceeds db.LogRotatesToFlush.
1024+
if ft.mt.Empty() {
1025+
return nil
1026+
}
1027+
1028+
if err := db.pushHead(ft); err != nil {
1029+
return err
1030+
}
1031+
10151032
dk, err := db.registry.latestDataKey()
10161033
if err != nil {
10171034
return y.Wrapf(err, "failed to get datakey in db.handleFlushTask")

db_test.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2001,8 +2001,9 @@ func TestNoCrash(t *testing.T) {
20012001
}
20022002

20032003
db.Lock()
2004-
// make head to point to first file
2005-
db.vhead = valuePointer{0, 0, 0}
2004+
// make head to point to second file. We cannot make it point to the first
2005+
// vlog file because we cannot push a zero head pointer.
2006+
db.vhead = valuePointer{1, 0, 0}
20062007
db.Unlock()
20072008
db.Close()
20082009

0 commit comments

Comments
 (0)