Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions badger/cmd/bank.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ func get(txn *badger.Txn, k []byte) (*badger.Item, error) {
return nil, badger.ErrKeyNotFound
}

// seekTotal retrives the total of all accounts by seeking for each account key.
// seekTotal retrieves the total of all accounts by seeking for each account key.
func seekTotal(txn *badger.Txn) ([]account, error) {
expected := uint64(numAccounts) * initialBal
var accounts []account
Expand Down Expand Up @@ -298,7 +298,7 @@ func compareTwo(db *badger.DB, before, after uint64) {

func runDisect(cmd *cobra.Command, args []string) error {
// The total did not match up. So, let's disect the DB to find the
// transction which caused the total mismatch.
// transaction which caused the total mismatch.
db, err := badger.OpenManaged(badger.DefaultOptions(sstDir).
WithValueDir(vlogDir).
WithReadOnly(true).
Expand Down
2 changes: 1 addition & 1 deletion badger/cmd/read_bench.go
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ func getSampleKeys(db *badger.DB, sampleSize int) ([][]byte, error) {
count := 0
stream := db.NewStreamAt(math.MaxUint64)

// overide stream.KeyToList as we only want keys. Also
// override stream.KeyToList as we only want keys. Also
// we can take only first version for the key.
stream.KeyToList = func(key []byte, itr *badger.Iterator) (*pb.KVList, error) {
l := &pb.KVList{}
Expand Down
2 changes: 1 addition & 1 deletion badger/cmd/write_bench.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ func init() {
"If it is true, badger will encrypt all the data stored on the disk.")
writeBenchCmd.Flags().BoolVar(&wo.loadBloomsOnOpen, "load-blooms", true,
"Load Bloom filter on DB open.")
writeBenchCmd.Flags().BoolVar(&wo.detectConflicts, "conficts", false,
writeBenchCmd.Flags().BoolVar(&wo.detectConflicts, "conflicts", false,
"If true, it badger will detect the conflicts")
writeBenchCmd.Flags().BoolVar(&wo.zstdComp, "zstd", false,
"If true, badger will use ZSTD mode. Otherwise, use default.")
Expand Down
2 changes: 1 addition & 1 deletion db.go
Original file line number Diff line number Diff line change
Expand Up @@ -1700,7 +1700,7 @@ func (db *DB) dropAll() (func(), error) {
return f, err
}
// prepareToDrop will stop all the incoming write and flushes any pending memtables.
// Before we drop, we'll stop the compaction because anyways all the datas are going to
// Before we drop, we'll stop the compaction because anyways all the data are going to
// be deleted.
db.stopCompactions()
resume := func() {
Expand Down
4 changes: 2 additions & 2 deletions db2_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -605,7 +605,7 @@ func TestL0GCBug(t *testing.T) {
t.Fatalf(err.Error())
}
}
// Ensure alteast one GC call was successful.
// Ensure at least one GC call was successful.
require.NotZero(t, success)
// CheckKeys reads all the keys previously stored.
checkKeys := func(db *DB) {
Expand Down Expand Up @@ -700,7 +700,7 @@ func TestWindowsDataLoss(t *testing.T) {
require.NoError(t, db.valueDirGuard.release())
}
// Don't use vlog.Close here. We don't want to fix the file size. Only un-mmap
// the data so that we can truncate the file durning the next vlog.Open.
// the data so that we can truncate the file during the next vlog.Open.
require.NoError(t, z.Munmap(db.vlog.filesMap[db.vlog.maxFid].Data))
for _, f := range db.vlog.filesMap {
require.NoError(t, f.Fd.Close())
Expand Down
6 changes: 3 additions & 3 deletions db_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ import (
// have occurred on the channel `ch`. We log messages or generate errors using `t`.
func waitForMessage(ch chan string, expected string, count int, timeout int, t *testing.T) {
if count <= 0 {
t.Logf("Will skip waiting for %s since exected count <= 0.",
t.Logf("Will skip waiting for %s since expected count <= 0.",
expected)
return
}
Expand Down Expand Up @@ -504,7 +504,7 @@ func dirSize(path string) (int64, error) {
// New keys are created with each for-loop iteration. During each
// iteration, the previous for-loop iteration's keys are deleted.
//
// To reproduce continous growth problem due to `badgerMove` keys,
// To reproduce continuous growth problem due to `badgerMove` keys,
// update `value.go` `discardEntry` line 1628 to return false
//
// Also with PR #1303, the delete keys are properly cleaned which
Expand Down Expand Up @@ -2169,7 +2169,7 @@ func TestForceFlushMemtable(t *testing.T) {
ops.ValueLogMaxEntries = 1

db, err := Open(ops)
require.NoError(t, err, "error while openning db")
require.NoError(t, err, "error while opening db")
defer func() { require.NoError(t, db.Close()) }()

for i := 0; i < 3; i++ {
Expand Down
2 changes: 1 addition & 1 deletion dir_windows.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (*d
// FILE_ATTRIBUTE_TEMPORARY is used to tell Windows to try to create the handle in memory.
// FILE_FLAG_DELETE_ON_CLOSE is not specified in syscall_windows.go but tells Windows to delete
// the file when all processes holding the handler are closed.
// XXX: this works but it's a bit klunky. i'd prefer to use LockFileEx but it needs unsafe pkg.
// XXX: this works but it's a bit clunky. i'd prefer to use LockFileEx but it needs unsafe pkg.
h, err := syscall.CreateFile(
syscall.StringToUTF16Ptr(absLockFilePath), 0, 0, nil,
syscall.OPEN_ALWAYS,
Expand Down
2 changes: 1 addition & 1 deletion errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ var (
// ErrReadOnlyTxn is returned if an update function is called on a read-only transaction.
ErrReadOnlyTxn = stderrors.New("No sets or deletes are allowed in a read-only transaction")

// ErrDiscardedTxn is returned if a previously discarded transaction is re-used.
// ErrDiscardedTxn is returned if a previously discarded transaction is reused.
ErrDiscardedTxn = stderrors.New("This transaction has been discarded. Create a new one")

// ErrEmptyKey is returned if an empty key is passed on an update function.
Expand Down
4 changes: 2 additions & 2 deletions key_registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ func WriteKeyRegistry(reg *KeyRegistry, opt KeyRegistryOptions) error {
var err error
eSanity, err = y.XORBlockAllocate(eSanity, opt.EncryptionKey, iv)
if err != nil {
return y.Wrapf(err, "Error while encrpting sanity text in WriteKeyRegistry")
return y.Wrapf(err, "Error while encrypting sanity text in WriteKeyRegistry")
}
}
y.Check2(buf.Write(iv))
Expand Down Expand Up @@ -310,7 +310,7 @@ func (kr *KeyRegistry) LatestDataKey() (*pb.DataKey, error) {
// validKey return datakey if the last generated key duration less than
// rotation duration.
validKey := func() (*pb.DataKey, bool) {
// Time diffrence from the last generated time.
// Time difference from the last generated time.
diff := time.Since(time.Unix(kr.lastCreated, 0))
if diff < kr.opt.EncryptionKeyRotationDuration {
return kr.dataKeys[kr.nextKeyID], true
Expand Down
2 changes: 1 addition & 1 deletion levels.go
Original file line number Diff line number Diff line change
Expand Up @@ -1573,7 +1573,7 @@ func (s *levelsController) addLevel0Table(t *table.Table) error {
}

for !s.levels[0].tryAddLevel0Table(t) {
// Before we unstall, we need to make sure that level 0 is healthy.
// Before we uninstall, we need to make sure that level 0 is healthy.
timeStart := time.Now()
for s.levels[0].numTables() >= s.kv.opt.NumLevelZeroTablesStall {
time.Sleep(10 * time.Millisecond)
Expand Down
4 changes: 2 additions & 2 deletions stream_writer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ func TestSendOnClosedStream(t *testing.T) {

// Defer for panic.
defer func() {
require.NotNil(t, recover(), "should have paniced")
require.NotNil(t, recover(), "should have panicked")
require.NoError(t, sw.Flush())
require.NoError(t, db.Close())
}()
Expand Down Expand Up @@ -514,7 +514,7 @@ func TestSendOnClosedStream2(t *testing.T) {

// Defer for panic.
defer func() {
require.NotNil(t, recover(), "should have paniced")
require.NotNil(t, recover(), "should have panicked")
require.NoError(t, sw.Flush())
require.NoError(t, db.Close())
}()
Expand Down
2 changes: 1 addition & 1 deletion structs.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ const (
maxHeaderSize = 22
)

// Encode encodes the header into []byte. The provided []byte should be atleast 5 bytes. The
// Encode encodes the header into []byte. The provided []byte should be at least 5 bytes. The
// function will panic if out []byte isn't large enough to hold all the values.
// The encoded header looks like
// +------+----------+------------+--------------+-----------+
Expand Down
2 changes: 1 addition & 1 deletion table/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,7 @@ func (b *Builder) addInternal(key []byte, value y.ValueStruct, valueLen uint32,

// ReachedCapacity returns true if we... roughly (?) reached capacity?
func (b *Builder) ReachedCapacity() bool {
// If encryption/compression is enabled then use the compresssed size.
// If encryption/compression is enabled then use the compressed size.
sumBlockSizes := b.compressedSize.Load()
if b.opts.Compression == options.None && b.opts.DataKey == nil {
sumBlockSizes = b.uncompressedSize.Load()
Expand Down
2 changes: 1 addition & 1 deletion table/builder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ func TestTableIndex(t *testing.T) {
opts Options
}{
{
name: "No encyption/compression",
name: "No encryption/compression",
opts: Options{
BlockSize: 4 * 1024,
BloomFalsePositive: 0.01,
Expand Down
4 changes: 2 additions & 2 deletions table/table.go
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ func (b *Block) decrRef() {
return
}

// Insert the []byte into pool only if the block is resuable. When a block
// Insert the []byte into pool only if the block is reusable. When a block
// is reusable a new []byte is used for decompression and this []byte can
// be reused.
// In case of an uncompressed block, the []byte is a reference to the
Expand Down Expand Up @@ -332,7 +332,7 @@ func OpenInMemoryTable(data []byte, id uint64, opt *Options) (*Table, error) {
}

func (t *Table) initBiggestAndSmallest() error {
// This defer will help gathering debugging info incase initIndex crashes.
// This defer will help gathering debugging info in case initIndex crashes.
defer func() {
if r := recover(); r != nil {
// Use defer for printing info because there may be an intermediate panic.
Expand Down
4 changes: 2 additions & 2 deletions test_extensions.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ type testOnlyDBExtensions struct {
// logToSyncChan sends a message to the DB's syncChan. Note that we expect
// that the DB never closes this channel; the responsibility for
// allocating and closing the channel belongs to the test module.
// if db.syncChan is nil or has never been initialized, ths will be
// if db.syncChan is nil or has never been initialized, this will be
// silently ignored.
func (db *DB) logToSyncChan(msg string) {
if db.syncChan != nil {
Expand All @@ -55,7 +55,7 @@ func (db *DB) logToSyncChan(msg string) {

// captureDiscardStats will copy the contents of the discardStats file
// maintained by vlog to the onCloseDiscardCapture map specified by
// db.opt. Of couse, if db.opt.onCloseDiscardCapture is nil (as expected
// db.opt. Of course, if db.opt.onCloseDiscardCapture is nil (as expected
// for a production system as opposed to a test system), this is a no-op.
func (db *DB) captureDiscardStats() {
if db.onCloseDiscardCapture != nil {
Expand Down
2 changes: 1 addition & 1 deletion value.go
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ func (vlog *valueLog) rewrite(f *logFile) error {
//
// NOTE: moveKeyi is the gc'ed version of the original key with version i
// We're calling the gc'ed keys as moveKey to simplify the
// explanantion. We used to add move keys but we no longer do that.
// explanation. We used to add move keys but we no longer do that.
//
// Assume we have 3 move keys in L0.
// - moveKey1 (points to vlog file 10),
Expand Down
2 changes: 1 addition & 1 deletion value_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1050,7 +1050,7 @@ func TestValueLogTruncate(t *testing.T) {
fileCountAfterCorruption := len(db.Tables()) + len(db.imm) + 1 // +1 for db.mt
// We should have one memtable and one sst file.
require.Equal(t, fileCountBeforeCorruption+1, fileCountAfterCorruption)
// maxFid will be 2 because we increment the max fid on DB open everytime.
// maxFid will be 2 because we increment the max fid on DB open every time.
require.Equal(t, 2, int(db.vlog.maxFid))
require.NoError(t, db.Close())
}
Expand Down
2 changes: 1 addition & 1 deletion y/watermark.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ func (w *WaterMark) WaitForMark(ctx context.Context, index uint64) error {
// process is used to process the Mark channel. This is not thread-safe,
// so only run one goroutine for process. One is sufficient, because
// all goroutine ops use purely memory and cpu.
// Each index has to emit atleast one begin watermark in serial order otherwise waiters
// Each index has to emit at least one begin watermark in serial order otherwise waiters
// can get blocked idefinitely. Example: We had an watermark at 100 and a waiter at 101,
// if no watermark is emitted at index 101 then waiter would get stuck indefinitely as it
// can't decide whether the task at 101 has decided not to emit watermark or it didn't get
Expand Down