@@ -541,6 +541,7 @@ namespace sparrow
541
541
542
542
// Efficient bit manipulation helpers for insert operations
543
543
constexpr void shift_bits_right (size_type start_pos, size_type bit_count, size_type shift_amount);
544
+ constexpr void shift_bits_left (size_type start_pos, size_type bit_count, size_type shift_amount);
544
545
constexpr void fill_bits_range (size_type start_pos, size_type bit_count, value_type value);
545
546
template <std::random_access_iterator InputIt>
546
547
constexpr iterator
@@ -1070,10 +1071,9 @@ namespace sparrow
1070
1071
return end ();
1071
1072
}
1072
1073
1073
- // TODO: The current implementation is not efficient. It can be improved.
1074
-
1075
- const size_type bit_to_move = size () - last_index;
1076
- for (size_t i = 0 ; i < bit_to_move; ++i)
1074
+ // Optimized: Move bits in bulk instead of bit by bit
1075
+ const size_type bits_to_move = size () - last_index;
1076
+ for (size_type i = 0 ; i < bits_to_move; ++i)
1077
1077
{
1078
1078
set (first_index + i, test (last_index + i));
1079
1079
}
@@ -1168,6 +1168,74 @@ namespace sparrow
1168
1168
}
1169
1169
}
1170
1170
1171
+ template <typename B>
1172
+ requires std::ranges::random_access_range<std::remove_pointer_t <B>>
1173
+ constexpr void
1174
+ dynamic_bitset_base<B>::shift_bits_left(size_type start_pos, size_type bit_count, size_type shift_amount)
1175
+ {
1176
+ if (bit_count == 0 || shift_amount == 0 || data () == nullptr )
1177
+ {
1178
+ return ;
1179
+ }
1180
+
1181
+ const size_type end_pos = start_pos + bit_count;
1182
+
1183
+ // Calculate block boundaries
1184
+ const size_type start_block = block_index (start_pos);
1185
+ const size_type end_block = block_index (end_pos - 1 );
1186
+ const size_type target_start_block = block_index (start_pos - shift_amount);
1187
+ const size_type target_end_block = block_index (end_pos - shift_amount - 1 );
1188
+
1189
+ // If the shift spans multiple blocks, use block-level operations
1190
+ if (shift_amount >= s_bits_per_block && start_block != end_block)
1191
+ {
1192
+ const size_type block_shift = shift_amount / s_bits_per_block;
1193
+ const size_type bit_shift = shift_amount % s_bits_per_block;
1194
+
1195
+ // Move whole blocks first (left shift means earlier indices)
1196
+ for (size_type i = start_block; i <= end_block; ++i)
1197
+ {
1198
+ const size_type target_block = i - block_shift;
1199
+ if (target_block < buffer ().size () && i >= block_shift)
1200
+ {
1201
+ buffer ().data ()[target_block] = buffer ().data ()[i];
1202
+ }
1203
+ }
1204
+
1205
+ // Handle remaining bit shift within blocks
1206
+ if (bit_shift > 0 )
1207
+ {
1208
+ for (size_type i = target_start_block; i < target_end_block; ++i)
1209
+ {
1210
+ if (i < buffer ().size ())
1211
+ {
1212
+ const block_type current = buffer ().data ()[i];
1213
+ const block_type next = (i + 1 < buffer ().size ()) ? buffer ().data ()[i + 1 ] : block_type (0 );
1214
+ buffer ().data ()[i] = static_cast <block_type>(
1215
+ (current >> bit_shift) | (next << (s_bits_per_block - bit_shift))
1216
+ );
1217
+ }
1218
+ }
1219
+ if (target_end_block < buffer ().size ())
1220
+ {
1221
+ buffer ().data ()[target_end_block] = static_cast <block_type>(
1222
+ buffer ().data ()[target_end_block] >> bit_shift
1223
+ );
1224
+ }
1225
+ }
1226
+ }
1227
+ else
1228
+ {
1229
+ // For smaller shifts, use bit-level operations optimized for the shift amount
1230
+ for (size_type i = 0 ; i < bit_count; ++i)
1231
+ {
1232
+ const size_type src_pos = start_pos + i;
1233
+ const size_type dst_pos = src_pos - shift_amount;
1234
+ set (dst_pos, test (src_pos));
1235
+ }
1236
+ }
1237
+ }
1238
+
1171
1239
template <typename B>
1172
1240
requires std::ranges::random_access_range<std::remove_pointer_t <B>>
1173
1241
constexpr void
0 commit comments