@@ -78,7 +78,7 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
78
78
__asm__ __volatile__ (" .set push\n "
79
79
" .set noreorder\n "
80
80
" 1:\n "
81
- " ll %1, %2 \n " // old = *ptr
81
+ " ll %1, %4 \n " // old = *ptr
82
82
" move %0, %3\n " // temp = new_value
83
83
" sc %0, %2\n " // *ptr = temp (with atomic check)
84
84
" beqz %0, 1b\n " // start again on atomic error
@@ -100,7 +100,7 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
100
100
__asm__ __volatile__ (" .set push\n "
101
101
" .set noreorder\n "
102
102
" 1:\n "
103
- " ll %0, %2 \n " // temp = *ptr
103
+ " ll %0, %4 \n " // temp = *ptr
104
104
" addu %1, %0, %3\n " // temp2 = temp + increment
105
105
" sc %1, %2\n " // *ptr = temp2 (with atomic check)
106
106
" beqz %1, 1b\n " // start again on atomic error
@@ -178,6 +178,132 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
178
178
return *ptr;
179
179
}
180
180
181
+ #if defined(__LP64__)
182
+ // 64-bit versions of the atomic ops.
183
+
184
+ inline Atomic64 NoBarrier_CompareAndSwap (volatile Atomic64* ptr,
185
+ Atomic64 old_value,
186
+ Atomic64 new_value) {
187
+ Atomic64 prev, tmp;
188
+ __asm__ __volatile__ (" .set push\n "
189
+ " .set noreorder\n "
190
+ " 1:\n "
191
+ " lld %0, %5\n " // prev = *ptr
192
+ " bne %0, %3, 2f\n " // if (prev != old_value) goto 2
193
+ " move %2, %4\n " // tmp = new_value
194
+ " scd %2, %1\n " // *ptr = tmp (with atomic check)
195
+ " beqz %2, 1b\n " // start again on atomic error
196
+ " nop\n " // delay slot nop
197
+ " 2:\n "
198
+ " .set pop\n "
199
+ : " =&r" (prev), " =m" (*ptr), " =&r" (tmp)
200
+ : " Ir" (old_value), " r" (new_value), " m" (*ptr)
201
+ : " memory" );
202
+ return prev;
203
+ }
204
+
205
+ // Atomically store new_value into *ptr, returning the previous value held in
206
+ // *ptr. This routine implies no memory barriers.
207
+ inline Atomic64 NoBarrier_AtomicExchange (volatile Atomic64* ptr,
208
+ Atomic64 new_value) {
209
+ Atomic64 temp, old;
210
+ __asm__ __volatile__ (" .set push\n "
211
+ " .set noreorder\n "
212
+ " 1:\n "
213
+ " lld %1, %4\n " // old = *ptr
214
+ " move %0, %3\n " // temp = new_value
215
+ " scd %0, %2\n " // *ptr = temp (with atomic check)
216
+ " beqz %0, 1b\n " // start again on atomic error
217
+ " nop\n " // delay slot nop
218
+ " .set pop\n "
219
+ : " =&r" (temp), " =&r" (old), " =m" (*ptr)
220
+ : " r" (new_value), " m" (*ptr)
221
+ : " memory" );
222
+
223
+ return old;
224
+ }
225
+
226
+ // Atomically increment *ptr by "increment". Returns the new value of
227
+ // *ptr with the increment applied. This routine implies no memory barriers.
228
+ inline Atomic64 NoBarrier_AtomicIncrement (volatile Atomic64* ptr,
229
+ Atomic64 increment) {
230
+ Atomic64 temp, temp2;
231
+
232
+ __asm__ __volatile__ (" .set push\n "
233
+ " .set noreorder\n "
234
+ " 1:\n "
235
+ " lld %0, %4\n " // temp = *ptr
236
+ " daddu %1, %0, %3\n " // temp2 = temp + increment
237
+ " scd %1, %2\n " // *ptr = temp2 (with atomic check)
238
+ " beqz %1, 1b\n " // start again on atomic error
239
+ " daddu %1, %0, %3\n " // temp2 = temp + increment
240
+ " .set pop\n "
241
+ : " =&r" (temp), " =&r" (temp2), " =m" (*ptr)
242
+ : " Ir" (increment), " m" (*ptr)
243
+ : " memory" );
244
+ // temp2 now holds the final value.
245
+ return temp2;
246
+ }
247
+
248
+ inline Atomic64 Barrier_AtomicIncrement (volatile Atomic64* ptr,
249
+ Atomic64 increment) {
250
+ MemoryBarrier ();
251
+ Atomic64 res = NoBarrier_AtomicIncrement (ptr, increment);
252
+ MemoryBarrier ();
253
+ return res;
254
+ }
255
+
256
+ // "Acquire" operations
257
+ // ensure that no later memory access can be reordered ahead of the operation.
258
+ // "Release" operations ensure that no previous memory access can be reordered
259
+ // after the operation. "Barrier" operations have both "Acquire" and "Release"
260
+ // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
261
+ // access.
262
+ inline Atomic64 Acquire_CompareAndSwap (volatile Atomic64* ptr,
263
+ Atomic64 old_value,
264
+ Atomic64 new_value) {
265
+ Atomic64 res = NoBarrier_CompareAndSwap (ptr, old_value, new_value);
266
+ MemoryBarrier ();
267
+ return res;
268
+ }
269
+
270
+ inline Atomic64 Release_CompareAndSwap (volatile Atomic64* ptr,
271
+ Atomic64 old_value,
272
+ Atomic64 new_value) {
273
+ MemoryBarrier ();
274
+ return NoBarrier_CompareAndSwap (ptr, old_value, new_value);
275
+ }
276
+
277
+ inline void NoBarrier_Store (volatile Atomic64* ptr, Atomic64 value) {
278
+ *ptr = value;
279
+ }
280
+
281
+ inline void Acquire_Store (volatile Atomic64* ptr, Atomic64 value) {
282
+ *ptr = value;
283
+ MemoryBarrier ();
284
+ }
285
+
286
+ inline void Release_Store (volatile Atomic64* ptr, Atomic64 value) {
287
+ MemoryBarrier ();
288
+ *ptr = value;
289
+ }
290
+
291
+ inline Atomic64 NoBarrier_Load (volatile const Atomic64* ptr) {
292
+ return *ptr;
293
+ }
294
+
295
+ inline Atomic64 Acquire_Load (volatile const Atomic64* ptr) {
296
+ Atomic64 value = *ptr;
297
+ MemoryBarrier ();
298
+ return value;
299
+ }
300
+
301
+ inline Atomic64 Release_Load (volatile const Atomic64* ptr) {
302
+ MemoryBarrier ();
303
+ return *ptr;
304
+ }
305
+ #endif
306
+
181
307
} // namespace internal
182
308
} // namespace protobuf
183
309
} // namespace google
0 commit comments