CANN量化FlashAttention算子
aclnnQuantFlashAttentionScore【免费下载链接】ops-transformer本项目是CANN提供的transformer类大模型算子库实现网络在NPU上加速计算。项目地址: https://gitcode.com/cann/ops-transformer产品支持情况产品是否支持Ascend 950PR/Ascend 950DT√Atlas A3 训练系列产品/Atlas A3 推理系列产品xAtlas A2 训练系列产品/Atlas A2 推理系列产品xAtlas 200I/500 A2 推理产品×Atlas 推理系列产品×Atlas 训练系列产品×功能说明接口功能量化的训练场景下使用FlashAttention算法实现self-attention自注意力的计算。计算公式注意力的正向计算公式如下$$ ppScaleSoftmax(scale(querykey^T(dSqdSk))) $$ $$ attention_outpvaluedSvdSp $$ 其中 $$ dSp1/pScale $$函数原型每个算子分为两段式接口必须先调用“aclnnQuantFlashAttentionScoreGetWorkspaceSize”接口获取计算所需workspace大小以及包含了算子计算流程的执行器再调用“aclnnQuantFlashAttentionScore”接口执行计算。aclnnStatus aclnnQuantFlashAttentionScoreGetWorkspaceSize( const aclTensor *query, const aclTensor *key, const aclTensor *value, const aclTensor *dScaleQ, const aclTensor *dScaleK, const aclTensor *dScaleV, const aclTensor *attenMaskOptional, const aclTensor *pScale, double scaleValue, int64_t preTokens, int64_t nextTokens, int64_t headNum, char *inputLayout, int64_t sparseMode, aclTensor *softmaxMaxOut, aclTensor *softmaxSumOut, aclTensor *softmaxOutOut, aclTensor *attentionOutOut, uint64_t *workspaceSize, aclOpExecutor **executor)aclnnStatus aclnnQuantFlashAttentionScore( void *workspace, uint64_t workspaceSize, aclOpExecutor *executor, const aclrtStream stream)aclnnQuantFlashAttentionScoreGetWorkspaceSize参数说明参数名输入/输出描述使用说明数据类型数据格式维度(shape)非连续Tensorquery输入公式中的query。数据类型与key/value的数据类型一致。HIFLOAT8ND4√key输入公式中的key。数据类型与query/value的数据类型一致。HIFLOAT8ND4√value输入公式中的value。数据类型与query/key的数据类型一致。HIFLOAT8ND4√attenMaskOptional输入保留参数暂未使用。-----dScaleQ输入query的量化参数。支持shape为[B,N1,Ceil(Sq/blocksize),1], blocksize目前支持128。FLOAT32ND4√dScaleK输入key的量化参数。支持shape为[B,N2,Ceil(Skv/blocksize),1], blocksize目前支持256。FLOAT32ND4√dScaleV输入value的量化参数。支持shape为[B,N2,Ceil(Skv/blocksize),1], blocksize目前支持512。FLOAT32ND4√pScale输入p的量化参数。输入shape为[1]。FLOAT32---scaleValue输入公式中的scale代表缩放系数。-DOUBLE---preTokens输入保留参数暂未使用。-----nextTokens输入保留参数暂未使用。-----headNum输入代表单卡的head个数即输入query的N轴长度。-INT64---inputLayout输入代表输入query、key、value的数据排布格式。支持BSND。String---sparseMode输入保留参数暂未使用。-----softmaxMaxOut输出Softmax计算的Max中间结果用于反向计算。输出的shape类型为[B,N,Sq,1]。FLOATND4√softmaxSumOut输出Softmax计算的Sum中间结果用于反向计算。输出的shape类型为[B,N,Sq,1]。FLOATND4√attentionOutOut输出计算公式的最终输出。数据类型和shape类型与query保持一致。BFLOAT16ND4√workspaceSize输出返回需要在Device侧申请的workspace大小。-----executor输出返回op执行器包含了算子计算流程。-----返回值aclnnStatus返回状态码具体参见aclnn返回码。第一段接口完成入参校验出现以下场景时报错返回码错误码描述ACLNN_ERR_PARAM_NULLPTR161001传入参数是必选输入输出或者必选属性且是空指针。ACLNN_ERR_PARAM_INVALID161002query、key、value、softmaxMaxOut、softmaxSumOut、attentionOutOut的数据类型不在支持的范围内。aclnnQuantFlashAttentionScore参数说明参数名输入/输出描述workspace输入在Device侧申请的workspace内存地址。workspaceSize输入在Device侧申请的workspace大小由第一段接口aclnnQuantFlashAttentionScoreGetWorkspaceSize获取。executor输入op执行器包含了算子计算流程。stream输入指定执行任务的Stream。返回值返回aclnnStatus状态码具体参见aclnn返回码。约束说明确定性计算aclnnQuantFlashAttentionScore默认确定性实现。该接口与PyTorch配合使用时需要保证CANN相关包与PyTorch相关包的版本匹配。输入query、key、value的Bbatchsize必须相等。DHead-Dim必须满足(qD kD kD vD)。inputLayout必须一致。关于数据shape的约束, 目前支持以下场景LayoutQueryShapeKeyShapeValueShapeBSND[1, 57600, 5, 128][1, 57600, 5, 128][1, 57600, 5, 128]BSND[1, 7200, 40, 128][1, 512, 40, 128][1, 512, 40, 128]query、key、value数据排布格式支持从多种维度解读其中BBatch表示输入样本批量大小、SSeq-Length表示输入样本序列长度、NHead-Num表示多头数、DHead-Dim表示隐藏层最小的单元尺寸。部分场景下如果计算量过大可能会导致算子执行超时aicore error类型报错errorStr为timeout or trap error此时建议做轴切分处理注这里的计算量会受B、S、N、D等参数的影响值越大计算量越大。调用示例调用示例代码如下仅供参考具体编译和执行过程请参考编译与运行样例。#include iostream #include vector #include cmath #include acl/acl.h #include aclnnop/aclnn_flash_attention_score.h #define CHECK_RET(cond, return_expr) \ do { \ if (!(cond)) { \ return_expr; \ } \ } while (0) #define LOG_PRINT(message, ...) \ do { \ printf(message, ##__VA_ARGS__); \ } while (0) int64_t GetShapeSize(const std::vectorint64_t shape) { int64_t shapeSize 1; for (auto i : shape) { shapeSize * i; } return shapeSize; } void PrintOutResult(std::vectorint64_t shape, void** deviceAddr) { auto size GetShapeSize(shape); std::vectorfloat resultData(size, 0); auto ret aclrtMemcpy(resultData.data(), resultData.size() * sizeof(resultData[0]), *deviceAddr, size * sizeof(resultData[0]), ACL_MEMCPY_DEVICE_TO_HOST); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(copy result from device to host failed. ERROR: %d\n, ret); return); for (int64_t i 0; i size; i) { LOG_PRINT(mean result[%ld] is: %f\n, i, resultData[i]); } } int Init(int32_t deviceId, aclrtContext* context, aclrtStream* stream) { // 固定写法AscendCL初始化 auto ret aclInit(nullptr); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclInit failed. ERROR: %d\n, ret); return ret); ret aclrtSetDevice(deviceId); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclrtSetDevice failed. ERROR: %d\n, ret); return ret); ret aclrtCreateContext(context, deviceId); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclrtCreateContext failed. ERROR: %d\n, ret); return ret); ret aclrtSetCurrentContext(*context); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclrtSetCurrentContext failed. ERROR: %d\n, ret); return ret); ret aclrtCreateStream(stream); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclrtCreateStream failed. ERROR: %d\n, ret); return ret); return 0; } template typename T int CreateAclTensor(const std::vectorT hostData, const std::vectorint64_t shape, void** deviceAddr, aclDataType dataType, aclTensor** tensor) { auto size GetShapeSize(shape) * sizeof(T); // 调用aclrtMalloc申请device侧内存 auto ret aclrtMalloc(deviceAddr, size, ACL_MEM_MALLOC_HUGE_FIRST); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclrtMalloc failed. ERROR: %d\n, ret); return ret); // 调用aclrtMemcpy将host侧数据拷贝到device侧内存上 ret aclrtMemcpy(*deviceAddr, size, hostData.data(), size, ACL_MEMCPY_HOST_TO_DEVICE); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclrtMemcpy failed. ERROR: %d\n, ret); return ret); // 计算连续tensor的strides std::vectorint64_t strides(shape.size(), 1); for (int64_t i shape.size() - 2; i 0; i--) { strides[i] shape[i 1] * strides[i 1]; } // 调用aclCreateTensor接口创建aclTensor *tensor aclCreateTensor(shape.data(), shape.size(), dataType, strides.data(), 0, aclFormat::ACL_FORMAT_ND, shape.data(), shape.size(), *deviceAddr); return 0; } int main() { // 1. 固定写法device/context/stream初始化参考AscendCL对外接口列表 // 根据自己的实际device填写deviceId int32_t deviceId 0; aclrtContext context; aclrtStream stream; auto ret Init(deviceId, context, stream); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(Init acl failed. ERROR: %d\n, ret); return ret); // 2. 构造输入与输出需要根据API的接口自定义构造 int64_t B 1; int64_t N1 40; int64_t N2 40; int64_t S1 7200; int64_t S2 512; int64_t D 128; int64_t q_size B * S1 * N1 * D; int64_t kv_size B * S2 * N2 * D; int64_t softmax_size B * N1 * S1; int64_t d_scale_q_size B * N1 * (S1 127) / 128; int64_t d_scale_k_size B * N1 * S2 / 256; int64_t d_scale_v_size B * N1 * S2 / 512; std::vectorint64_t qShape {B, S1, N1, D}; std::vectorint64_t kShape {B, S2, N2, D}; std::vectorint64_t vShape {B, S2, N2, D}; std::vectorint64_t dScaleQShape {B, N1, (S1 127) / 128, 1}; std::vectorint64_t dScaleKShape {B, N2, S2 / 256, 1}; std::vectorint64_t dScaleVShape {B, N2, S2 / 512, 1}; std::vectorint64_t pScaleShape {1}; std::vectorint64_t attentionOutShape {B, S1, N1, D}; std::vectorint64_t softmaxMaxShape {B, N1, S1, 1}; std::vectorint64_t softmaxSumShape {B, N1, S1, 1}; void* qDeviceAddr nullptr; void* kDeviceAddr nullptr; void* vDeviceAddr nullptr; void* dScaleQDeviceAddr nullptr; void* dScaleKDeviceAddr nullptr; void* dScaleVDeviceAddr nullptr; void* pScaleDeviceAddr nullptr; void* attentionOutDeviceAddr nullptr; void* softmaxMaxDeviceAddr nullptr; void* softmaxSumDeviceAddr nullptr; aclTensor* q nullptr; aclTensor* k nullptr; aclTensor* v nullptr; aclTensor* attenmask nullptr; aclTensor* dScaleQ nullptr; aclTensor* dScaleK nullptr; aclTensor* dScaleV nullptr; aclTensor* pScale nullptr; aclTensor* attentionOut nullptr; aclTensor* softmaxMax nullptr; aclTensor* softmaxSum nullptr; aclTensor* softmaxOut nullptr; std::vectoruint8_t qHostData(q_size, 1.0); std::vectoruint8_t kHostData(kv_size, 1.0); std::vectoruint8_t vHostData(kv_size, 1.0); std::vectorfloat dScaleQHostData(d_scale_q_size, 1.0); std::vectorfloat dScaleKHostData(d_scale_k_size, 1.0); std::vectorfloat dScaleVHostData(d_scale_v_size, 1.0); std::vectorfloat pScaleHostData(1, 1.0); std::vectorfloat attentionOutHostData(q_size, 255); std::vectorfloat softmaxMaxHostData(softmax_size, 3.0); std::vectorfloat softmaxSumHostData(softmax_size, 3.0); ret CreateAclTensor(qHostData, qShape, qDeviceAddr, aclDataType::ACL_HIFLOAT8, q); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(kHostData, kShape, kDeviceAddr, aclDataType::ACL_HIFLOAT8, k); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(vHostData, vShape, vDeviceAddr, aclDataType::ACL_HIFLOAT8, v); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(dScaleQHostData, dScaleQShape, dScaleQDeviceAddr, aclDataType::ACL_FLOAT, dScaleQ); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(dScaleKHostData, dScaleKShape, dScaleKDeviceAddr, aclDataType::ACL_FLOAT, dScaleK); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(dScaleVHostData, dScaleVShape, dScaleVDeviceAddr, aclDataType::ACL_FLOAT, dScaleV); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(pScaleHostData, pScaleShape, pScaleDeviceAddr, aclDataType::ACL_FLOAT, pScale); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(attentionOutHostData, attentionOutShape, attentionOutDeviceAddr, aclDataType::ACL_BF16, attentionOut); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(softmaxMaxHostData, softmaxMaxShape, softmaxMaxDeviceAddr, aclDataType::ACL_FLOAT, softmaxMax); CHECK_RET(ret ACL_SUCCESS, return ret); ret CreateAclTensor(softmaxSumHostData, softmaxSumShape, softmaxSumDeviceAddr, aclDataType::ACL_FLOAT, softmaxSum); CHECK_RET(ret ACL_SUCCESS, return ret); double scaleValue 0.088388; int64_t preTokens 65536; int64_t nextTokens 65536; int64_t headNum 40; int64_t sparseMode 0; char layOut[5] {B, S, N, D, 0}; // 3. 调用CANN算子库API需要修改为具体的Api名称 uint64_t workspaceSize 0; aclOpExecutor* executor; // 调用aclnnQuantFlashAttentionScore第一段接口 ret aclnnQuantFlashAttentionScoreGetWorkspaceSize( q, k, v, attenmask, dScaleQ, dScaleK, dScaleV, pScale, scaleValue, preTokens, nextTokens, headNum, layOut, sparseMode, softmaxMax, softmaxSum, softmaxOut, attentionOut, workspaceSize, executor); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclnnQuantFlashAttentionScoreGetWorkspaceSize failed. ERROR: %d\n, ret); return ret); // 根据第一段接口计算出的workspaceSize申请device内存 void* workspaceAddr nullptr; if (workspaceSize 0) { ret aclrtMalloc(workspaceAddr, workspaceSize, ACL_MEM_MALLOC_HUGE_FIRST); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(allocate workspace failed. ERROR: %d\n, ret); return ret); } // 调用aclnnQuantFlashAttentionScore第二段接口 ret aclnnQuantFlashAttentionScore(workspaceAddr, workspaceSize, executor, stream); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclnnQuantFlashAttentionScore failed. ERROR: %d\n, ret); return ret); // 4. 固定写法同步等待任务执行结束 ret aclrtSynchronizeStream(stream); CHECK_RET(ret ACL_SUCCESS, LOG_PRINT(aclrtSynchronizeStream failed. ERROR: %d\n, ret); return ret); // 5. 获取输出的值将device侧内存上的结果拷贝至host侧需要根据具体API的接口定义修改 PrintOutResult(attentionOutShape, attentionOutDeviceAddr); PrintOutResult(softmaxMaxShape, softmaxMaxDeviceAddr); PrintOutResult(softmaxSumShape, softmaxSumDeviceAddr); // 6. 释放aclTensor和aclScalar需要根据具体API的接口定义修改 aclDestroyTensor(q); aclDestroyTensor(k); aclDestroyTensor(v); aclDestroyTensor(attenmask); aclDestroyTensor(attentionOut); aclDestroyTensor(softmaxMax); aclDestroyTensor(softmaxSum); // 7. 释放device资源 aclrtFree(qDeviceAddr); aclrtFree(kDeviceAddr); aclrtFree(vDeviceAddr); aclrtFree(attentionOutDeviceAddr); aclrtFree(softmaxMaxDeviceAddr); aclrtFree(softmaxSumDeviceAddr); if (workspaceSize 0) { aclrtFree(workspaceAddr); } aclrtDestroyStream(stream); aclrtDestroyContext(context); aclrtResetDevice(deviceId); aclFinalize(); return 0; }【免费下载链接】ops-transformer本项目是CANN提供的transformer类大模型算子库实现网络在NPU上加速计算。项目地址: https://gitcode.com/cann/ops-transformer创作声明:本文部分内容由AI辅助生成(AIGC),仅供参考