Skip to content

Commit 8b2e280

Browse files
andy-kimballmanishrjain
authored andcommitted
Enable skiplist values that are up to 32 bits in length (#472)
Currently, inline skiplist values are limited to a length of 16 bits. This commit increases that limit to 32 bits. It does this by making node.value a uint64, and then packing the value offset and size into it, as two uint32 values. Because node.value is accessed with atomic.LoadUint64, it must be aligned on a 64-bit boundary. This is guaranteed by Arena.putNode, which is changed to always align on a 64-bit boundary rather than on a pointer boundary (which would incorrectly align on 32-bit boundary on a 32-bit machine).
1 parent 5ff6b6f commit 8b2e280

File tree

3 files changed

+22
-25
lines changed

3 files changed

+22
-25
lines changed

skl/arena.go

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,12 @@ import (
2525

2626
const (
2727
offsetSize = int(unsafe.Sizeof(uint32(0)))
28-
ptrAlign = int(unsafe.Sizeof(uintptr(0))) - 1
28+
29+
// Always align nodes on 64-bit boundaries, even on 32-bit architectures,
30+
// so that the node.value field is 64-bit aligned. This is necessary because
31+
// node.getValueOffset uses atomic.LoadUint64, which expects its input
32+
// pointer to be 64-bit aligned.
33+
nodeAlign = int(unsafe.Sizeof(uint64(0))) - 1
2934
)
3035

3136
// Arena should be lock-free.
@@ -61,14 +66,14 @@ func (s *Arena) putNode(height int) uint32 {
6166
unusedSize := (maxHeight - height) * offsetSize
6267

6368
// Pad the allocation with enough bytes to ensure pointer alignment.
64-
l := uint32(MaxNodeSize - unusedSize + ptrAlign)
69+
l := uint32(MaxNodeSize - unusedSize + nodeAlign)
6570
n := atomic.AddUint32(&s.n, l)
6671
y.AssertTruef(int(n) <= len(s.buf),
6772
"Arena too small, toWrite:%d newTotal:%d limit:%d",
6873
l, n, len(s.buf))
6974

7075
// Return the aligned offset.
71-
m := (n - l + uint32(ptrAlign)) & ^uint32(ptrAlign)
76+
m := (n - l + uint32(nodeAlign)) & ^uint32(nodeAlign)
7277
return m
7378
}
7479

@@ -115,8 +120,8 @@ func (s *Arena) getKey(offset uint32, size uint16) []byte {
115120

116121
// getVal returns byte slice at offset. The given size should be just the value
117122
// size and should NOT include the meta bytes.
118-
func (s *Arena) getVal(offset uint32, size uint16) (ret y.ValueStruct) {
119-
ret.Decode(s.buf[offset : offset+uint32(size)])
123+
func (s *Arena) getVal(offset uint32, size uint32) (ret y.ValueStruct) {
124+
ret.Decode(s.buf[offset : offset+size])
120125
return
121126
}
122127

skl/skl.go

Lines changed: 9 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -53,9 +53,8 @@ type node struct {
5353
// Multiple parts of the value are encoded as a single uint64 so that it
5454
// can be atomically loaded and stored:
5555
// value offset: uint32 (bits 0-31)
56-
// value size : uint16 (bits 32-47)
57-
// 12 bytes are allocated to ensure 8 byte alignment also on 32bit systems.
58-
value [12]byte
56+
// value size : uint32 (bits 32-63)
57+
value uint64
5958

6059
// A byte slice is 24 bytes. We are trying to save space here.
6160
keyOffset uint32 // Immutable. No need to lock to access key.
@@ -109,17 +108,17 @@ func newNode(arena *Arena, key []byte, v y.ValueStruct, height int) *node {
109108
node.keyOffset = arena.putKey(key)
110109
node.keySize = uint16(len(key))
111110
node.height = uint16(height)
112-
*node.value64BitAlignedPtr() = encodeValue(arena.putVal(v), v.EncodedSize())
111+
node.value = encodeValue(arena.putVal(v), v.EncodedSize())
113112
return node
114113
}
115114

116-
func encodeValue(valOffset uint32, valSize uint16) uint64 {
115+
func encodeValue(valOffset uint32, valSize uint32) uint64 {
117116
return uint64(valSize)<<32 | uint64(valOffset)
118117
}
119118

120-
func decodeValue(value uint64) (valOffset uint32, valSize uint16) {
119+
func decodeValue(value uint64) (valOffset uint32, valSize uint32) {
121120
valOffset = uint32(value)
122-
valSize = uint16(value >> 32)
121+
valSize = uint32(value >> 32)
123122
return
124123
}
125124

@@ -135,15 +134,8 @@ func NewSkiplist(arenaSize int64) *Skiplist {
135134
}
136135
}
137136

138-
func (s *node) value64BitAlignedPtr() *uint64 {
139-
if uintptr(unsafe.Pointer(&s.value))%8 == 0 {
140-
return (*uint64)(unsafe.Pointer(&s.value))
141-
}
142-
return (*uint64)(unsafe.Pointer(&s.value[4]))
143-
}
144-
145-
func (s *node) getValueOffset() (uint32, uint16) {
146-
value := atomic.LoadUint64(s.value64BitAlignedPtr())
137+
func (s *node) getValueOffset() (uint32, uint32) {
138+
value := atomic.LoadUint64(&s.value)
147139
return decodeValue(value)
148140
}
149141

@@ -154,7 +146,7 @@ func (s *node) key(arena *Arena) []byte {
154146
func (s *node) setValue(arena *Arena, v y.ValueStruct) {
155147
valOffset := arena.putVal(v)
156148
value := encodeValue(valOffset, v.EncodedSize())
157-
atomic.StoreUint64(s.value64BitAlignedPtr(), value)
149+
atomic.StoreUint64(&s.value, value)
158150
}
159151

160152
func (s *node) getNextOffset(h int) uint32 {

y/iterator.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,14 +47,14 @@ func sizeVarint(x uint64) (n int) {
4747
}
4848

4949
// EncodedSize is the size of the ValueStruct when encoded
50-
func (v *ValueStruct) EncodedSize() uint16 {
50+
func (v *ValueStruct) EncodedSize() uint32 {
5151
sz := len(v.Value) + 2 // meta, usermeta.
5252
if v.ExpiresAt == 0 {
53-
return uint16(sz + 1)
53+
return uint32(sz + 1)
5454
}
5555

5656
enc := sizeVarint(v.ExpiresAt)
57-
return uint16(sz + enc)
57+
return uint32(sz + enc)
5858
}
5959

6060
// Decode uses the length of the slice to infer the length of the Value field.

0 commit comments

Comments
 (0)