CANN Floyd注意力梯度算子
aclnnFusedFloydAttentionGrad【免费下载链接】ops-transformer本项目是CANN提供的transformer类大模型算子库实现网络在NPU上加速计算。项目地址: https://gitcode.com/cann/ops-transformer产品支持情况产品是否支持Ascend 950PR/Ascend 950DT×Atlas A3 训练系列产品/Atlas A3 推理系列产品√Atlas A2 训练系列产品/Atlas A2 推理系列产品√Atlas 200I/500 A2 推理产品×Atlas 推理系列产品×Atlas 训练系列产品×功能说明接口功能训练场景下计算Floyd注意力的反向输出FloydAttn相较于传统FA主要是计算qk/pv注意力时会额外将seq作为batch轴从而转换为batchMatmul。计算公式已知注意力的正向计算公式为$$ SMask(scale*(QK_1^T QK_2^T), atten_mask) \ PSoftmax(S) \ Y(PV_1PV_2) $$则注意力的反向计算公式为$$ dV_1P^TdY $$$$ dV_2P^TdY $$$$ dQ\frac{((dS)*K_1)}{\sqrt{d}}\frac{((dS)*K_2)}{\sqrt{d}} $$$$ dK_1\frac{((dS)^T*Q)}{\sqrt{d}} $$$$ dK_2\frac{((dS)^T*Q)}{\sqrt{d}} $$函数原型每个算子分为两段式接口必须先调用“aclnnFusedFloydAttentionGradGetWorkspaceSize”接口获取计算所需workspace大小以及包含了算子计算流程的执行器再调用“aclnnFusedFloydAttentionGrad”接口执行计算。aclnnStatus aclnnFusedFloydAttentionGradGetWorkspaceSize( const aclTensor *query, const aclTensor *key1, const aclTensor *value1, const aclTensor *key2, const aclTensor *value2, const aclTensor *dy, const aclTensor *attenMaskOptional, const aclTensor *softmaxMax, const aclTensor *softmaxSum, const aclTensor *attentionIn, double scaleValue, const aclTensor *dqOut, const aclTensor *dk1Out, const aclTensor *dv1Out, const aclTensor *dk2Out, const aclTensor *dv2Out, uint64_t *workspaceSize, aclOpExecutor **executor)aclnnStatus aclnnFusedFloydAttentionGrad( void *workspace, uint64_t workspaceSize, aclOpExecutor *executor, aclrtStream stream)aclnnFusedFloydAttentionGradGetWorkspaceSize参数说明参数名输入/输出描述使用说明数据类型数据格式维度(shape)非连续TensorqueryaclTensor输入公式中的Q。数据类型与key1/value1/key2/value2的数据类型一致。FLOAT16、BFLOAT16ND[B,H,N,M,D]√key1aclTensor输入公式中的K1。数据类型与query/value1/key2/value2的数据类型一致。FLOAT16、BFLOAT16ND[B,H,N,K,D]√value1aclTensor输入公式中的V1。数据类型与query/key1/key2/value2的数据类型一致。FLOAT16、BFLOAT16ND[B,H,N,K,D]√key2aclTensor输入公式中的K2。数据类型与query/key1/value1/value2的数据类型一致。FLOAT16、BFLOAT16ND[B,H,K,M,D]√value2aclTensor输入公式中的V2。数据类型与query/key1/value1/key2的数据类型一致。FLOAT16、BFLOAT16ND[B,H,K,M,D]√dyaclTensor输入公式中的输入dY。-FLOAT16、BFLOAT16ND[B,H,N,M,D]√attenMaskOptionalaclTensor输入公式中的atten_mask。取值为1代表该位不参与计算为0代表该位参与计算。BOOL、UINT8ND[B,1,N,1,K]√softmaxMaxaclTensor输入注意力正向计算的中间输出。输出的shape类型为[B,H,N,M,8]。FLOATND[B,H,N,M,8]√softmaxSumaclTensor输入注意力正向计算的中间输出。输出的shape类型为[B,H,N,M,8]。FLOATND[B,H,N,M,8]√attentionInaclTensor输入注意力正向计算的最终输出。数据类型和shape类型与query保持一致。FLOAT16、BFLOAT16ND[B,H,N,M,D]√dqOutaclTensor输出公式中的dQ表示query的梯度。-FLOAT16、BFLOAT16ND[B,H,N,M,D]√dk1OutaclTensor输出公式中的dK1表示key1的梯度。-FLOAT16、BFLOAT16ND[B,H,N,K,D]√dv1OutaclTensor输出公式中的dV1表示value1的梯度。-FLOAT16、BFLOAT16ND[B,H,N,K,D]√dk2OutaclTensor输出公式中的dK2表示key2的梯度。-FLOAT16、BFLOAT16ND[B,H,K,M,D]√dv2OutaclTensor输出公式中的dV2表示value2的梯度。-FLOAT16、BFLOAT16ND[B,H,K,M,D]√scaleValuedouble输入公式中的scale代表缩放系数。-DOUBLE---workspaceSizeuint64_t*输出返回需要在Device侧申请的workspace大小。-----executoraclOpExecutor输出返回op执行器包含了算子计算流程。-----返回值返回aclnnStatus状态码具体参见aclnn返回码。第一段接口完成入参校验出现以下场景时报错返回值错误码描述ACLNN_ERR_PARAM_NULLPTR161001传入参数是必选输入输出或者必选属性且是空指针。ACLNN_ERR_PARAM_INVALID161002query、key1、value1、key2、value2、dy、attenMaskOptional、softmaxMax、softmaxSum、attentionIn、dqOut、dk1Out、dv1Out、dk2Out、dv2Out的数据类型或数据格式不在支持的范围内。ACLNN_ERR_INNER_TILING_ERROR561002tiling发生异常query、key1、value1、key2、value2、dy、attenMaskOptional、softmaxMax、softmaxSum、attentionIn不符合约束说明。aclnnFusedFloydAttentionGrad参数说明参数名输入/输出描述workspace输入在Device侧申请的workspace内存地址。workspaceSize输入在Device侧申请的workspace大小由第一段接口aclnnFusedFloydAttentionGradGetWorkspaceSize获取。executor输入op执行器包含了算子计算流程。stream输入指定执行任务的Stream。返回值返回aclnnStatus状态码具体参见aclnn返回码。约束说明该接口不支持确定性。该接口与PyTorch配合使用时需要保证CANN相关包与PyTorch相关包的版本匹配关于数据shape的约束其中B取值范围为1~2K。H取值范围为1~256。N取值范围为16~1M且N%160。M取值范围为128~1M且M%1280。K取值范围为128~1M且K%1280。D取值范围为32/64/128。query与key1的第0/2/4轴需相同。key1与value1 shape需相同。key2与value2 shape需相同。query与dy/attentionIn shape需相同。softmaxMax与softmaxSum shape需相同。D只支持32/64/128。由于底层指令限制当M*D65536或者K*D65536时会出现明显性能下降此时建议使用小算子拼接替换实现。调用示例调用示例代码如下仅供参考具体编译和执行过程请参考编译与运行样例。#include iostream #include vector #include cstdint #include cmath #include acl/acl.h #include aclnnop/aclnn_fused_floyd_attention_grad.h #define CHECK_RET(cond, return_expr) \ do { \ if (!(cond)) { \ return_expr; \ } \ } while (0) #define LOG_PRINT(message, ...) \ do { \ printf(message, ##__VA_ARGS__); \ } while (0) int64_t GetShapeSize(const std::vectorint64_t shape) { int64_t shapeSize 1; for (auto i : shape) { shapeSize * i; } return shapeSize; } void PrintOutResult(std::vectorint64_t shape, void** deviceAddr) { auto size GetShapeSize(shape); std::vectorfloat resultData(size, 0); auto ret aclrtMemcpy(resultData.data(), resultData.size() * sizeof(resultData[0]), *deviceAddr, size * sizeof(resultData[0]), ACL_MEMCPY_DEVICE_TO_HOST); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(copy result from device to host failed. ERROR: %d\n, ret); return); for (int64_t i 0; i size; i) { LOG_PRINT(mean result[%ld] is: %f\n, i, resultData[i]); } } int Init(int32_t deviceId, aclrtStream* stream) { // 固定写法AscendCL初始化 auto ret aclInit(nullptr); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclInit failed. ERROR: %d\n, ret); return ret); ret aclrtSetDevice(deviceId); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclrtSetDevice failed. ERROR: %d\n, ret); return ret); ret aclrtCreateStream(stream); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclrtCreateStream failed. ERROR: %d\n, ret); return ret); return 0; } template typename T int CreateAclTensor(const std::vectorT hostData, const std::vectorint64_t shape, void** deviceAddr, aclDataType dataType, aclTensor** tensor) { auto size GetShapeSize(shape) * sizeof(T); // 调用aclrtMalloc申请device侧内存 auto ret aclrtMalloc(deviceAddr, size, ACL_MEM_MALLOC_HUGE_FIRST); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclrtMalloc failed. ERROR: %d\n, ret); return ret); // 调用aclrtMemcpy将host侧数据拷贝到device侧内存上 ret aclrtMemcpy(*deviceAddr, size, hostData.data(), size, ACL_MEMCPY_HOST_TO_DEVICE); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclrtMemcpy failed. ERROR: %d\n, ret); return ret); // 计算连续tensor的strides std::vectorint64_t strides(shape.size(), 1); for (int64_t i shape.size() - 2; i 0; i--) { strides[i] shape[i 1] * strides[i 1]; } // 调用aclCreateTensor接口创建aclTensor *tensor aclCreateTensor(shape.data(), shape.size(), dataType, strides.data(), 0, aclFormat::ACL_FORMAT_ND, shape.data(), shape.size(), *deviceAddr); return 0; } int main() { // 1. 固定写法device/stream初始化参考AscendCL对外接口列表 // 根据自己的实际device填写deviceId int32_t deviceId 0; aclrtStream stream; auto ret Init(deviceId, stream); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(Init acl failed. ERROR: %d\n, ret); return ret); // 2. 构造输入与输出需要根据API的接口自定义构造 int64_t B 1; int64_t N 1; int64_t S1 256; int64_t S2 256; int64_t S3 256; int64_t D 128; int64_t q_size B * N * S1 * S2 * D; int64_t k1_v1_size B * N * S1 * S3 * D; int64_t k2_v2_size B * N * S3 * S2 * D; int64_t atten_mask_size B * S1 * S3; int64_t softmax_size B * N * S1 * S2 * 8; std::vectorint64_t qShape {B, N, S1, S2, D}; std::vectorint64_t k1v1Shape {B, N, S1, S3, D}; std::vectorint64_t k2v2Shape {B, N, S3, S2, D}; std::vectorint64_t attenmaskShape {B, 1, S1, 1, S3}; std::vectorint64_t softmaxMaxShape {B, N, S1, S2, 8}; std::vectorint64_t softmaxSumShape {B, N, S1, S2, 8}; std::vectorint64_t attentionInShape {B, N, S1, S2, D}; std::vectorint64_t dqShape {B, N, S1, S2, D}; std::vectorint64_t dk1dv1Shape {B, N, S1, S3, D}; std::vectorint64_t dk2dv2Shape {B, N, S3, S2, D}; void* qDeviceAddr nullptr; void* k1DeviceAddr nullptr; void* v1DeviceAddr nullptr; void* k2DeviceAddr nullptr; void* v2DeviceAddr nullptr; void* dxDeviceAddr nullptr; void* attenmaskDeviceAddr nullptr; void* softmaxMaxDeviceAddr nullptr; void* softmaxSumDeviceAddr nullptr; void* attentionInDeviceAddr nullptr; void* dqDeviceAddr nullptr; void* dk1DeviceAddr nullptr; void* dv1DeviceAddr nullptr; void* dk2DeviceAddr nullptr; void* dv2DeviceAddr nullptr; aclTensor* q nullptr; aclTensor* k1 nullptr; aclTensor* v1 nullptr; aclTensor* k2 nullptr; aclTensor* v2 nullptr; aclTensor* dx nullptr; aclTensor* attenmask nullptr; aclTensor* softmaxMax nullptr; aclTensor* softmaxSum nullptr; aclTensor* attentionIn nullptr; aclTensor* dq nullptr; aclTensor* dk1 nullptr; aclTensor* dv1 nullptr; aclTensor* dk2 nullptr; aclTensor* dv2 nullptr; std::vectorfloat qHostData(q_size, 1.0); std::vectorfloat k1HostData(k1_v1_size, 1.0); std::vectorfloat v1HostData(k1_v1_size, 1.0); std::vectorfloat k2HostData(k2_v2_size, 1.0); std::vectorfloat v2HostData(k2_v2_size, 1.0); std::vectorfloat dxHostData(q_size, 1.0); std::vectoruint8_t attenmaskHostData(atten_mask_size, 0); std::vectorfloat softmaxMaxHostData(softmax_size, 3.0); std::vectorfloat softmaxSumHostData(softmax_size, 3.0); std::vectorfloat attentionInHostData(q_size, 1.0); std::vectorfloat dqHostData(q_size, 0); std::vectorfloat dk1HostData(k1_v1_size, 0); std::vectorfloat dv1HostData(k1_v1_size, 0); std::vectorfloat dk2HostData(k2_v2_size, 0); std::vectorfloat dv2HostData(k2_v2_size, 0); ret CreateAclTensor(qHostData, qShape, qDeviceAddr, aclDataType::ACL_FLOAT16, q); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(k1HostData, k1v1Shape, k1DeviceAddr, aclDataType::ACL_FLOAT16, k1); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(v1HostData, k1v1Shape, v1DeviceAddr, aclDataType::ACL_FLOAT16, v1); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(k2HostData, k2v2Shape, k2DeviceAddr, aclDataType::ACL_FLOAT16, k2); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(v2HostData, k2v2Shape, v2DeviceAddr, aclDataType::ACL_FLOAT16, v2); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(dxHostData, qShape, dxDeviceAddr, aclDataType::ACL_FLOAT16, dx); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(attenmaskHostData, attenmaskShape, attenmaskDeviceAddr, aclDataType::ACL_UINT8, attenmask); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(softmaxMaxHostData, softmaxMaxShape, softmaxMaxDeviceAddr, aclDataType::ACL_FLOAT, softmaxMax); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(softmaxSumHostData, softmaxSumShape, softmaxSumDeviceAddr, aclDataType::ACL_FLOAT, softmaxSum); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(attentionInHostData, attentionInShape, attentionInDeviceAddr, aclDataType::ACL_FLOAT16, attentionIn); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(dqHostData, dqShape, dqDeviceAddr, aclDataType::ACL_FLOAT16, dq); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(dk1HostData, dk1dv1Shape, dk1DeviceAddr, aclDataType::ACL_FLOAT16, dk1); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(dv1HostData, dk1dv1Shape, dv1DeviceAddr, aclDataType::ACL_FLOAT16, dv1); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(dk2HostData, dk2dv2Shape, dk2DeviceAddr, aclDataType::ACL_FLOAT16, dk2); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(dv2HostData, dk2dv2Shape, dv2DeviceAddr, aclDataType::ACL_FLOAT16, dv2); CHECK_RET(ret ACL_SUCCESS, return ret); double scaleValue 1.0/sqrt(128); // 3. 调用CANN算子库API需要修改为具体的Api名称 uint64_t workspaceSize 0; aclOpExecutor* executor; // 调用aclnnFusedFloydAttentionGrad第一段接口 ret aclnnFusedFloydAttentionGradGetWorkspaceSize(q, k1, v1, k2, v2, dx, attenmask, softmaxMax, softmaxSum, attentionIn, scaleValue, dq, dk1, dv1, dk2, dv2, workspaceSize, executor); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclnnFusedFloydAttentionGradGetWorkspaceSize failed. ERROR: %d\n, ret); return ret); // 根据第一段接口计算出的workspaceSize申请device内存 void* workspaceAddr nullptr; if (workspaceSize 0) { ret aclrtMalloc(workspaceAddr, workspaceSize, ACL_MEM_MALLOC_HUGE_FIRST); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(allocate workspace failed. ERROR: %d\n, ret); return ret); } // 调用aclnnFusedFloydAttentionGrad第二段接口 ret aclnnFusedFloydAttentionGrad(workspaceAddr, workspaceSize, executor, stream); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclnnFusedFloydAttentionGrad failed. ERROR: %d\n, ret); return ret); // 4. 固定写法同步等待任务执行结束 ret aclrtSynchronizeStream(stream); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclrtSynchronizeStream failed. ERROR: %d\n, ret); return ret); // 5. 获取输出的值将device侧内存上的结果拷贝至host侧需要根据具体API的接口定义修改 PrintOutResult(dqShape, dqDeviceAddr); // PrintOutResult(dkShape, dkDeviceAddr); // PrintOutResult(dvShape, dvDeviceAddr); // 6. 释放aclTensor和aclScalar需要根据具体API的接口定义修改 aclDestroyTensor(q); aclDestroyTensor(k1); aclDestroyTensor(v1); aclDestroyTensor(k2); aclDestroyTensor(v2); aclDestroyTensor(dx); aclDestroyTensor(attenmask); aclDestroyTensor(softmaxMax); aclDestroyTensor(softmaxSum); aclDestroyTensor(attentionIn); aclDestroyTensor(dq); aclDestroyTensor(dk1); aclDestroyTensor(dv1); aclDestroyTensor(dk2); aclDestroyTensor(dv2); // 7. 释放device资源 aclrtFree(qDeviceAddr); aclrtFree(k1DeviceAddr); aclrtFree(v1DeviceAddr); aclrtFree(k2DeviceAddr); aclrtFree(v2DeviceAddr); aclrtFree(dxDeviceAddr); aclrtFree(attenmaskDeviceAddr); aclrtFree(softmaxMaxDeviceAddr); aclrtFree(softmaxSumDeviceAddr); aclrtFree(attentionInDeviceAddr); aclrtFree(dqDeviceAddr); aclrtFree(dk1DeviceAddr); aclrtFree(dv1DeviceAddr); aclrtFree(dk2DeviceAddr); aclrtFree(dv2DeviceAddr); if (workspaceSize 0) { aclrtFree(workspaceAddr); } aclrtDestroyStream(stream); aclrtResetDevice(deviceId); aclFinalize(); return 0; }【免费下载链接】ops-transformer本项目是CANN提供的transformer类大模型算子库实现网络在NPU上加速计算。项目地址: https://gitcode.com/cann/ops-transformer创作声明:本文部分内容由AI辅助生成(AIGC),仅供参考