diff --git a/badger/cmd/bank.go b/badger/cmd/bank.go index a314c36f1..e2954bd2f 100644 --- a/badger/cmd/bank.go +++ b/badger/cmd/bank.go @@ -206,7 +206,7 @@ func get(txn *badger.Txn, k []byte) (*badger.Item, error) { return nil, badger.ErrKeyNotFound } -// seekTotal retrives the total of all accounts by seeking for each account key. +// seekTotal retrieves the total of all accounts by seeking for each account key. func seekTotal(txn *badger.Txn) ([]account, error) { expected := uint64(numAccounts) * initialBal var accounts []account @@ -298,7 +298,7 @@ func compareTwo(db *badger.DB, before, after uint64) { func runDisect(cmd *cobra.Command, args []string) error { // The total did not match up. So, let's disect the DB to find the - // transction which caused the total mismatch. + // transaction which caused the total mismatch. db, err := badger.OpenManaged(badger.DefaultOptions(sstDir). WithValueDir(vlogDir). WithReadOnly(true). diff --git a/badger/cmd/read_bench.go b/badger/cmd/read_bench.go index fd158156c..f1038689b 100644 --- a/badger/cmd/read_bench.go +++ b/badger/cmd/read_bench.go @@ -185,7 +185,7 @@ func getSampleKeys(db *badger.DB, sampleSize int) ([][]byte, error) { count := 0 stream := db.NewStreamAt(math.MaxUint64) - // overide stream.KeyToList as we only want keys. Also + // override stream.KeyToList as we only want keys. Also // we can take only first version for the key. stream.KeyToList = func(key []byte, itr *badger.Iterator) (*pb.KVList, error) { l := &pb.KVList{} diff --git a/badger/cmd/write_bench.go b/badger/cmd/write_bench.go index fc8a86bc9..b7863bbdf 100644 --- a/badger/cmd/write_bench.go +++ b/badger/cmd/write_bench.go @@ -102,7 +102,7 @@ func init() { "If it is true, badger will encrypt all the data stored on the disk.") writeBenchCmd.Flags().BoolVar(&wo.loadBloomsOnOpen, "load-blooms", true, "Load Bloom filter on DB open.") - writeBenchCmd.Flags().BoolVar(&wo.detectConflicts, "conficts", false, + writeBenchCmd.Flags().BoolVar(&wo.detectConflicts, "conflicts", false, "If true, it badger will detect the conflicts") writeBenchCmd.Flags().BoolVar(&wo.zstdComp, "zstd", false, "If true, badger will use ZSTD mode. Otherwise, use default.") diff --git a/db.go b/db.go index f30949ebf..282a04793 100644 --- a/db.go +++ b/db.go @@ -1700,7 +1700,7 @@ func (db *DB) dropAll() (func(), error) { return f, err } // prepareToDrop will stop all the incoming write and flushes any pending memtables. - // Before we drop, we'll stop the compaction because anyways all the datas are going to + // Before we drop, we'll stop the compaction because anyways all the data are going to // be deleted. db.stopCompactions() resume := func() { diff --git a/db2_test.go b/db2_test.go index f541a628d..cfb0552d8 100644 --- a/db2_test.go +++ b/db2_test.go @@ -605,7 +605,7 @@ func TestL0GCBug(t *testing.T) { t.Fatalf(err.Error()) } } - // Ensure alteast one GC call was successful. + // Ensure at least one GC call was successful. require.NotZero(t, success) // CheckKeys reads all the keys previously stored. checkKeys := func(db *DB) { @@ -700,7 +700,7 @@ func TestWindowsDataLoss(t *testing.T) { require.NoError(t, db.valueDirGuard.release()) } // Don't use vlog.Close here. We don't want to fix the file size. Only un-mmap - // the data so that we can truncate the file durning the next vlog.Open. + // the data so that we can truncate the file during the next vlog.Open. require.NoError(t, z.Munmap(db.vlog.filesMap[db.vlog.maxFid].Data)) for _, f := range db.vlog.filesMap { require.NoError(t, f.Fd.Close()) diff --git a/db_test.go b/db_test.go index 21293467d..17c4be847 100644 --- a/db_test.go +++ b/db_test.go @@ -34,7 +34,7 @@ import ( // have occurred on the channel `ch`. We log messages or generate errors using `t`. func waitForMessage(ch chan string, expected string, count int, timeout int, t *testing.T) { if count <= 0 { - t.Logf("Will skip waiting for %s since exected count <= 0.", + t.Logf("Will skip waiting for %s since expected count <= 0.", expected) return } @@ -504,7 +504,7 @@ func dirSize(path string) (int64, error) { // New keys are created with each for-loop iteration. During each // iteration, the previous for-loop iteration's keys are deleted. // -// To reproduce continous growth problem due to `badgerMove` keys, +// To reproduce continuous growth problem due to `badgerMove` keys, // update `value.go` `discardEntry` line 1628 to return false // // Also with PR #1303, the delete keys are properly cleaned which @@ -2169,7 +2169,7 @@ func TestForceFlushMemtable(t *testing.T) { ops.ValueLogMaxEntries = 1 db, err := Open(ops) - require.NoError(t, err, "error while openning db") + require.NoError(t, err, "error while opening db") defer func() { require.NoError(t, db.Close()) }() for i := 0; i < 3; i++ { diff --git a/dir_windows.go b/dir_windows.go index 237649eb4..e1b6b2856 100644 --- a/dir_windows.go +++ b/dir_windows.go @@ -74,7 +74,7 @@ func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (*d // FILE_ATTRIBUTE_TEMPORARY is used to tell Windows to try to create the handle in memory. // FILE_FLAG_DELETE_ON_CLOSE is not specified in syscall_windows.go but tells Windows to delete // the file when all processes holding the handler are closed. - // XXX: this works but it's a bit klunky. i'd prefer to use LockFileEx but it needs unsafe pkg. + // XXX: this works but it's a bit clunky. i'd prefer to use LockFileEx but it needs unsafe pkg. h, err := syscall.CreateFile( syscall.StringToUTF16Ptr(absLockFilePath), 0, 0, nil, syscall.OPEN_ALWAYS, diff --git a/errors.go b/errors.go index 3063fd627..2007a0c78 100644 --- a/errors.go +++ b/errors.go @@ -33,7 +33,7 @@ var ( // ErrReadOnlyTxn is returned if an update function is called on a read-only transaction. ErrReadOnlyTxn = stderrors.New("No sets or deletes are allowed in a read-only transaction") - // ErrDiscardedTxn is returned if a previously discarded transaction is re-used. + // ErrDiscardedTxn is returned if a previously discarded transaction is reused. ErrDiscardedTxn = stderrors.New("This transaction has been discarded. Create a new one") // ErrEmptyKey is returned if an empty key is passed on an update function. diff --git a/key_registry.go b/key_registry.go index 028fae0d1..f01a390c4 100644 --- a/key_registry.go +++ b/key_registry.go @@ -246,7 +246,7 @@ func WriteKeyRegistry(reg *KeyRegistry, opt KeyRegistryOptions) error { var err error eSanity, err = y.XORBlockAllocate(eSanity, opt.EncryptionKey, iv) if err != nil { - return y.Wrapf(err, "Error while encrpting sanity text in WriteKeyRegistry") + return y.Wrapf(err, "Error while encrypting sanity text in WriteKeyRegistry") } } y.Check2(buf.Write(iv)) @@ -310,7 +310,7 @@ func (kr *KeyRegistry) LatestDataKey() (*pb.DataKey, error) { // validKey return datakey if the last generated key duration less than // rotation duration. validKey := func() (*pb.DataKey, bool) { - // Time diffrence from the last generated time. + // Time difference from the last generated time. diff := time.Since(time.Unix(kr.lastCreated, 0)) if diff < kr.opt.EncryptionKeyRotationDuration { return kr.dataKeys[kr.nextKeyID], true diff --git a/levels.go b/levels.go index 5d20d3417..eea53d3bd 100644 --- a/levels.go +++ b/levels.go @@ -1573,7 +1573,7 @@ func (s *levelsController) addLevel0Table(t *table.Table) error { } for !s.levels[0].tryAddLevel0Table(t) { - // Before we unstall, we need to make sure that level 0 is healthy. + // Before we uninstall, we need to make sure that level 0 is healthy. timeStart := time.Now() for s.levels[0].numTables() >= s.kv.opt.NumLevelZeroTablesStall { time.Sleep(10 * time.Millisecond) diff --git a/stream_writer_test.go b/stream_writer_test.go index ef56068ab..10a94d4d3 100644 --- a/stream_writer_test.go +++ b/stream_writer_test.go @@ -458,7 +458,7 @@ func TestSendOnClosedStream(t *testing.T) { // Defer for panic. defer func() { - require.NotNil(t, recover(), "should have paniced") + require.NotNil(t, recover(), "should have panicked") require.NoError(t, sw.Flush()) require.NoError(t, db.Close()) }() @@ -514,7 +514,7 @@ func TestSendOnClosedStream2(t *testing.T) { // Defer for panic. defer func() { - require.NotNil(t, recover(), "should have paniced") + require.NotNil(t, recover(), "should have panicked") require.NoError(t, sw.Flush()) require.NoError(t, db.Close()) }() diff --git a/structs.go b/structs.go index 75aec4bc5..a50b899c0 100644 --- a/structs.go +++ b/structs.go @@ -65,7 +65,7 @@ const ( maxHeaderSize = 22 ) -// Encode encodes the header into []byte. The provided []byte should be atleast 5 bytes. The +// Encode encodes the header into []byte. The provided []byte should be at least 5 bytes. The // function will panic if out []byte isn't large enough to hold all the values. // The encoded header looks like // +------+----------+------------+--------------+-----------+ diff --git a/table/builder.go b/table/builder.go index 70ebc99bd..49b05f431 100644 --- a/table/builder.go +++ b/table/builder.go @@ -356,7 +356,7 @@ func (b *Builder) addInternal(key []byte, value y.ValueStruct, valueLen uint32, // ReachedCapacity returns true if we... roughly (?) reached capacity? func (b *Builder) ReachedCapacity() bool { - // If encryption/compression is enabled then use the compresssed size. + // If encryption/compression is enabled then use the compressed size. sumBlockSizes := b.compressedSize.Load() if b.opts.Compression == options.None && b.opts.DataKey == nil { sumBlockSizes = b.uncompressedSize.Load() diff --git a/table/builder_test.go b/table/builder_test.go index e410344b5..1dc7fbc4b 100644 --- a/table/builder_test.go +++ b/table/builder_test.go @@ -38,7 +38,7 @@ func TestTableIndex(t *testing.T) { opts Options }{ { - name: "No encyption/compression", + name: "No encryption/compression", opts: Options{ BlockSize: 4 * 1024, BloomFalsePositive: 0.01, diff --git a/table/table.go b/table/table.go index a32515e2d..63c428f1f 100644 --- a/table/table.go +++ b/table/table.go @@ -214,7 +214,7 @@ func (b *Block) decrRef() { return } - // Insert the []byte into pool only if the block is resuable. When a block + // Insert the []byte into pool only if the block is reusable. When a block // is reusable a new []byte is used for decompression and this []byte can // be reused. // In case of an uncompressed block, the []byte is a reference to the @@ -332,7 +332,7 @@ func OpenInMemoryTable(data []byte, id uint64, opt *Options) (*Table, error) { } func (t *Table) initBiggestAndSmallest() error { - // This defer will help gathering debugging info incase initIndex crashes. + // This defer will help gathering debugging info in case initIndex crashes. defer func() { if r := recover(); r != nil { // Use defer for printing info because there may be an intermediate panic. diff --git a/test_extensions.go b/test_extensions.go index 0a863ea9f..afce16829 100644 --- a/test_extensions.go +++ b/test_extensions.go @@ -45,7 +45,7 @@ type testOnlyDBExtensions struct { // logToSyncChan sends a message to the DB's syncChan. Note that we expect // that the DB never closes this channel; the responsibility for // allocating and closing the channel belongs to the test module. -// if db.syncChan is nil or has never been initialized, ths will be +// if db.syncChan is nil or has never been initialized, this will be // silently ignored. func (db *DB) logToSyncChan(msg string) { if db.syncChan != nil { @@ -55,7 +55,7 @@ func (db *DB) logToSyncChan(msg string) { // captureDiscardStats will copy the contents of the discardStats file // maintained by vlog to the onCloseDiscardCapture map specified by -// db.opt. Of couse, if db.opt.onCloseDiscardCapture is nil (as expected +// db.opt. Of course, if db.opt.onCloseDiscardCapture is nil (as expected // for a production system as opposed to a test system), this is a no-op. func (db *DB) captureDiscardStats() { if db.onCloseDiscardCapture != nil { diff --git a/value.go b/value.go index e30a80cfb..ae9f754a6 100644 --- a/value.go +++ b/value.go @@ -252,7 +252,7 @@ func (vlog *valueLog) rewrite(f *logFile) error { // // NOTE: moveKeyi is the gc'ed version of the original key with version i // We're calling the gc'ed keys as moveKey to simplify the - // explanantion. We used to add move keys but we no longer do that. + // explanation. We used to add move keys but we no longer do that. // // Assume we have 3 move keys in L0. // - moveKey1 (points to vlog file 10), diff --git a/value_test.go b/value_test.go index 7999cc968..c9545c9af 100644 --- a/value_test.go +++ b/value_test.go @@ -1050,7 +1050,7 @@ func TestValueLogTruncate(t *testing.T) { fileCountAfterCorruption := len(db.Tables()) + len(db.imm) + 1 // +1 for db.mt // We should have one memtable and one sst file. require.Equal(t, fileCountBeforeCorruption+1, fileCountAfterCorruption) - // maxFid will be 2 because we increment the max fid on DB open everytime. + // maxFid will be 2 because we increment the max fid on DB open every time. require.Equal(t, 2, int(db.vlog.maxFid)) require.NoError(t, db.Close()) } diff --git a/y/watermark.go b/y/watermark.go index 5479c9cbf..5f9d0a59c 100644 --- a/y/watermark.go +++ b/y/watermark.go @@ -119,7 +119,7 @@ func (w *WaterMark) WaitForMark(ctx context.Context, index uint64) error { // process is used to process the Mark channel. This is not thread-safe, // so only run one goroutine for process. One is sufficient, because // all goroutine ops use purely memory and cpu. -// Each index has to emit atleast one begin watermark in serial order otherwise waiters +// Each index has to emit at least one begin watermark in serial order otherwise waiters // can get blocked idefinitely. Example: We had an watermark at 100 and a waiter at 101, // if no watermark is emitted at index 101 then waiter would get stuck indefinitely as it // can't decide whether the task at 101 has decided not to emit watermark or it didn't get