mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-09-26 20:41:53 +08:00

* support machete weight only gemm * add generate * update * fix * change file location * add sm_version limit * fix * fix * fix ci * fix coverage * fix xpu
85 lines
3.5 KiB
Plaintext
85 lines
3.5 KiB
Plaintext
// Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
#include "machete_mm_launcher.cuh"
|
|
#include "machete_prepack_launcher.cuh"
|
|
|
|
template <typename T>
|
|
std::optional<T> ConvertToStdOptional(const paddle::optional<T>& paddle_opt) {
|
|
return paddle_opt ? std::optional<T>(paddle_opt.get()) : std::nullopt;
|
|
}
|
|
|
|
paddle::Tensor mm(paddle::Tensor const& A, paddle::Tensor const& B,
|
|
int64_t b_type_id,
|
|
std::optional<paddle::DataType> const& maybe_out_type,
|
|
std::optional<paddle::Tensor> const& maybe_group_scales,
|
|
std::optional<paddle::Tensor> const& maybe_group_zeros,
|
|
int64_t maybe_group_size,
|
|
std::optional<paddle::Tensor> const& maybe_channel_scales,
|
|
std::optional<paddle::Tensor> const& maybe_token_scales,
|
|
std::string maybe_schedule) {
|
|
machete::ScalarType const b_type = machete::ScalarType::from_id(b_type_id);
|
|
std::optional<int64_t> maybe_group_size_opt;
|
|
std::optional<std::string> maybe_schedule_opt;
|
|
if (maybe_schedule == "") {
|
|
maybe_schedule_opt = std::nullopt;
|
|
}
|
|
return machete::mm_dispatch({.A = A,
|
|
.B = B,
|
|
.b_type = b_type,
|
|
.maybe_out_type = maybe_out_type,
|
|
.maybe_group_scales = maybe_group_scales,
|
|
.maybe_group_zeros = maybe_group_zeros,
|
|
.maybe_group_size = maybe_group_size_opt,
|
|
.maybe_channel_scales = maybe_channel_scales,
|
|
.maybe_token_scales = maybe_token_scales,
|
|
.maybe_schedule = maybe_schedule_opt});
|
|
}
|
|
|
|
std::vector<paddle::Tensor> MacheteMMKernel(
|
|
paddle::Tensor const& A, paddle::Tensor const& B,
|
|
paddle::optional<paddle::Tensor> const& maybe_group_scales,
|
|
paddle::optional<paddle::Tensor> const& maybe_group_zeros,
|
|
paddle::optional<paddle::Tensor> const& maybe_channel_scales,
|
|
paddle::optional<paddle::Tensor> const& maybe_token_scales,
|
|
std::string const& b_type_str,
|
|
std::string const& maybe_out_type_str,
|
|
int64_t const& maybe_group_size,
|
|
std::string const& maybe_schedule
|
|
) {
|
|
|
|
machete::ScalarTypeId b_type_id;
|
|
paddle::DataType maybe_out_type;
|
|
if (b_type_str == "uint4b8") {
|
|
b_type_id = machete::kU4B8.id();
|
|
} else {
|
|
PADDLE_ENFORCE(false, "b_type_str not supported!");
|
|
}
|
|
if (maybe_out_type_str == "float16") {
|
|
maybe_out_type = paddle::DataType::FLOAT16;
|
|
} else if (maybe_out_type_str == "bfloat16") {
|
|
maybe_out_type = paddle::DataType::BFLOAT16;
|
|
} else {
|
|
maybe_out_type = A.dtype();
|
|
}
|
|
auto out = mm(A, B, b_type_id, maybe_out_type,
|
|
ConvertToStdOptional<paddle::Tensor>(maybe_group_scales),
|
|
ConvertToStdOptional<paddle::Tensor>(maybe_group_zeros),
|
|
maybe_group_size,
|
|
ConvertToStdOptional<paddle::Tensor>(maybe_channel_scales),
|
|
ConvertToStdOptional<paddle::Tensor>(maybe_token_scales),
|
|
maybe_schedule);
|
|
return {out};
|
|
}
|