@@ -1329,7 +1329,26 @@ static void aclnn_pow_tensor_tensor(ggml_backend_cann_context& ctx,
1329
1329
GGML_CANN_CALL_ACLNN_OP (ctx, InplacePowTensorTensor, acl_dst, acl_exp);
1330
1330
}
1331
1331
1332
-
1332
+ /* *
1333
+ * @brief Generate a range of values and apply a scalar base exponentiation.
1334
+ *
1335
+ * This function creates an evenly spaced sequence from `start` to `stop` (exclusive),
1336
+ * with step size `step`, stores it in a temporary buffer, and then computes:
1337
+ *
1338
+ * @f[
1339
+ * slope[i] = m^{\left( start + i \cdot step \right)}, \quad 0 \le i < size
1340
+ * @f]
1341
+ *
1342
+ * The results are written to the provided @p slope_buffer.
1343
+ *
1344
+ * @param ctx CANN backend context for memory allocation and operator execution.
1345
+ * @param slope_buffer Pointer to the output buffer (float array) for the computed slope values.
1346
+ * @param m Scalar base for the exponentiation.
1347
+ * @param size Number of elements in the generated sequence.
1348
+ * @param start Starting exponent offset.
1349
+ * @param stop Stopping exponent offset (exclusive).
1350
+ * @param step Step size for the exponent increment.
1351
+ */
1333
1352
static void aclnn_get_slope_inner (ggml_backend_cann_context& ctx, void * slope_buffer,
1334
1353
float m, int64_t size, float start, float stop, float step){
1335
1354
int64_t ne[] = {size};
@@ -1351,6 +1370,31 @@ static void aclnn_get_slope_inner(ggml_backend_cann_context& ctx, void* slope_bu
1351
1370
ggml_cann_release_resources (ctx, sc, arange_tensor, slope_tensor);
1352
1371
}
1353
1372
1373
+ /* *
1374
+ * @brief Compute slope values for multiple attention heads based on ALiBi bias parameters.
1375
+ *
1376
+ * This function generates slope values for each attention head according to the ALiBi
1377
+ * (Attention with Linear Biases) method. It splits the computation into two ranges depending
1378
+ * on whether the head index is less than @p n_head_log2 or not, and uses different base values
1379
+ * (`m0` and `m1`) for the exponentiation.
1380
+ *
1381
+ * @f[
1382
+ * slope[h] =
1383
+ * \begin{cases}
1384
+ * m_0^{(h + 1)}, & h < n\_head\_log2 \\
1385
+ * m_1^{\left( 2 \cdot (h - n\_head\_log2) + 1 \right)}, & h \geq n\_head\_log2
1386
+ * \end{cases}
1387
+ * \quad , \quad \text{if } max\_bias > 0
1388
+ * @f]
1389
+ *
1390
+ * If @p max_bias <= 0, all slope values are set to 1.0.
1391
+ *
1392
+ * @param ctx CANN backend context for memory allocation and operator execution.
1393
+ * @param n_head Total number of attention heads.
1394
+ * @param slope_buffer Pointer to the output buffer (float array) for storing slopes.
1395
+ * @param max_bias Maximum bias value for slope computation.
1396
+ *
1397
+ */
1354
1398
static void aclnn_get_slope (ggml_backend_cann_context & ctx, int64_t n_head,
1355
1399
void * slope_buffer, float max_bias) {
1356
1400
const int n_head_log2 = 1u << (uint32_t ) floor (log2 (n_head));
@@ -1382,6 +1426,27 @@ static void aclnn_get_slope(ggml_backend_cann_context & ctx, int64_t n_head,
1382
1426
}
1383
1427
}
1384
1428
1429
+ /* *
1430
+ * @brief Add ALiBi (Attention with Linear Biases) positional biases to the attention mask.
1431
+ *
1432
+ * This function computes the ALiBi slopes for each attention head (if max_bias > 0),
1433
+ * multiplies them with the attention mask to produce bias tensors, and adds these biases
1434
+ * to the destination tensor (@p dst).
1435
+ *
1436
+ * The function performs necessary broadcasting of the mask and slope tensors to match
1437
+ * the shape of the destination tensor, then applies element-wise multiplication and addition
1438
+ * using CANN operators.
1439
+ *
1440
+ * @param ctx CANN backend context for memory management and operator execution.
1441
+ * @param mask Input attention mask tensor, assumed to be contiguous.
1442
+ * @param dst Destination tensor to which ALiBi biases will be added.
1443
+ * @param dst_ptr Pointer to the memory of the destination tensor.
1444
+ * @param max_bias Maximum bias value controlling the slope scaling.
1445
+ *
1446
+ * @note
1447
+ * - Write data into dst_ptr using only the shape information of the dst tensor.
1448
+ * - `GGML_MAX_DIMS + 2` is used to extend tensor dimensions for broadcasting.
1449
+ */
1385
1450
static void aclnn_add_alibi (ggml_backend_cann_context& ctx, ggml_tensor* mask,
1386
1451
ggml_tensor* dst, void * dst_ptr, float max_bias) {
1387
1452
void * slope_buffer = nullptr ;
@@ -1399,7 +1464,6 @@ static void aclnn_add_alibi(ggml_backend_cann_context& ctx, ggml_tensor* mask,
1399
1464
}
1400
1465
1401
1466
// broadcast for mask, slop and dst;
1402
- GGML_ASSERT (ggml_is_contiguous (mask));
1403
1467
int64_t nr2 = dst->ne [2 ] / mask->ne [2 ];
1404
1468
int64_t nr3 = dst->ne [3 ] / mask->ne [3 ];
1405
1469
@@ -1429,6 +1493,8 @@ static void aclnn_add_alibi(ggml_backend_cann_context& ctx, ggml_tensor* mask,
1429
1493
slope_ne, slope_nb, GGML_MAX_DIMS + 2 );
1430
1494
aclTensor * acl_mask = ggml_cann_create_tensor (
1431
1495
mask, mask_ne, mask_nb, GGML_MAX_DIMS + 2 );
1496
+
1497
+ // write data into dst_ptr using only the shape information of the dst tensor.
1432
1498
aclTensor * acl_dst = ggml_cann_create_tensor (
1433
1499
dst_ptr, ggml_cann_type_mapping (dst->type ),
1434
1500
ggml_type_size (dst->type ), dst_ne, dst_nb,
0 commit comments