mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
support qwen2 weight only (#3571)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
Publish Job / publish_pre_check (push) Has been cancelled
Publish Job / print_publish_pre_check_outputs (push) Has been cancelled
Publish Job / FD-Clone-Linux (push) Has been cancelled
Publish Job / Show Code Archive Output (push) Has been cancelled
Publish Job / BUILD_SM8090 (push) Has been cancelled
Publish Job / BUILD_SM8689 (push) Has been cancelled
Publish Job / PADDLE_PYPI_UPLOAD_8090 (push) Has been cancelled
Publish Job / PADDLE_PYPI_UPLOAD_8689 (push) Has been cancelled
Publish Job / Run FastDeploy Unit Tests and Coverage (push) Has been cancelled
Publish Job / Run FastDeploy LogProb Tests (push) Has been cancelled
Publish Job / Extracted partial CE model tasks to run in CI. (push) Has been cancelled
Publish Job / Run Base Tests (push) Has been cancelled
Publish Job / Run Accuracy Tests (push) Has been cancelled
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
Publish Job / publish_pre_check (push) Has been cancelled
Publish Job / print_publish_pre_check_outputs (push) Has been cancelled
Publish Job / FD-Clone-Linux (push) Has been cancelled
Publish Job / Show Code Archive Output (push) Has been cancelled
Publish Job / BUILD_SM8090 (push) Has been cancelled
Publish Job / BUILD_SM8689 (push) Has been cancelled
Publish Job / PADDLE_PYPI_UPLOAD_8090 (push) Has been cancelled
Publish Job / PADDLE_PYPI_UPLOAD_8689 (push) Has been cancelled
Publish Job / Run FastDeploy Unit Tests and Coverage (push) Has been cancelled
Publish Job / Run FastDeploy LogProb Tests (push) Has been cancelled
Publish Job / Extracted partial CE model tasks to run in CI. (push) Has been cancelled
Publish Job / Run Base Tests (push) Has been cancelled
Publish Job / Run Accuracy Tests (push) Has been cancelled
This commit is contained in:
@@ -348,7 +348,6 @@ class ColumnParallelLinear(LinearBase):
|
|||||||
if self.with_bias:
|
if self.with_bias:
|
||||||
# col parallel
|
# col parallel
|
||||||
_set_var_distributed(self.bias, split_axis=1)
|
_set_var_distributed(self.bias, split_axis=1)
|
||||||
if self.nranks > 1:
|
|
||||||
set_weight_attrs(self.bias, {"output_dim": True})
|
set_weight_attrs(self.bias, {"output_dim": True})
|
||||||
|
|
||||||
|
|
||||||
@@ -404,6 +403,7 @@ class MergedColumnParallelLinear(ColumnParallelLinear):
|
|||||||
|
|
||||||
def weight_loader(self, param, loaded_weight, loaded_shard_id: Optional[str] = None):
|
def weight_loader(self, param, loaded_weight, loaded_shard_id: Optional[str] = None):
|
||||||
output_dim = getattr(param, "output_dim", None)
|
output_dim = getattr(param, "output_dim", None)
|
||||||
|
assert output_dim is not None
|
||||||
shard_dim = -1 if output_dim else 0
|
shard_dim = -1 if output_dim else 0
|
||||||
output_size = param.shape[shard_dim]
|
output_size = param.shape[shard_dim]
|
||||||
if loaded_shard_id is None:
|
if loaded_shard_id is None:
|
||||||
@@ -517,11 +517,12 @@ class QKVParallelLinear(ColumnParallelLinear):
|
|||||||
with_bias=with_bias,
|
with_bias=with_bias,
|
||||||
add_bias=add_bias,
|
add_bias=add_bias,
|
||||||
)
|
)
|
||||||
setattr(self.weight, "output_dim", True)
|
|
||||||
|
|
||||||
def weight_loader(self, param, loaded_weight, loaded_shard_id: Optional[str] = None):
|
def weight_loader(self, param, loaded_weight, loaded_shard_id: Optional[str] = None):
|
||||||
output_dim = getattr(param, "output_dim", None)
|
output_dim = getattr(param, "output_dim", None)
|
||||||
head_dim = param.shape[output_dim] // (self.num_heads_per_rank + 2 * self.kv_num_heads_per_rank)
|
assert output_dim is not None
|
||||||
|
dim = -1 if output_dim else 0
|
||||||
|
head_dim = param.shape[dim] // (self.num_heads_per_rank + 2 * self.kv_num_heads_per_rank)
|
||||||
if loaded_shard_id is None:
|
if loaded_shard_id is None:
|
||||||
# Loaded weight is already fused on disk
|
# Loaded weight is already fused on disk
|
||||||
shard_offsets = [
|
shard_offsets = [
|
||||||
@@ -540,7 +541,6 @@ class QKVParallelLinear(ColumnParallelLinear):
|
|||||||
assert loaded_shard_id in ["q", "k", "v"]
|
assert loaded_shard_id in ["q", "k", "v"]
|
||||||
# Tensor parallelism splits the weight along the output_dim
|
# Tensor parallelism splits the weight along the output_dim
|
||||||
if self.nranks != 1:
|
if self.nranks != 1:
|
||||||
dim = -1 if output_dim else 0
|
|
||||||
if isinstance(loaded_weight, np.ndarray):
|
if isinstance(loaded_weight, np.ndarray):
|
||||||
size = loaded_weight.shape[dim]
|
size = loaded_weight.shape[dim]
|
||||||
else:
|
else:
|
||||||
@@ -717,7 +717,6 @@ class RowParallelLinear(LinearBase):
|
|||||||
if self.with_bias:
|
if self.with_bias:
|
||||||
# col parallel
|
# col parallel
|
||||||
_set_var_distributed(self.bias, split_axis=0)
|
_set_var_distributed(self.bias, split_axis=0)
|
||||||
if self.nranks > 1:
|
|
||||||
set_weight_attrs(
|
set_weight_attrs(
|
||||||
self.bias,
|
self.bias,
|
||||||
{
|
{
|
||||||
|
@@ -16,6 +16,7 @@
|
|||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import re
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
import paddle
|
import paddle
|
||||||
@@ -314,7 +315,10 @@ class Qwen2ForCausalLM(ModelForCasualLM):
|
|||||||
weights_iterator (Iterator): An iterator yielding (name, weight) pairs.
|
weights_iterator (Iterator): An iterator yielding (name, weight) pairs.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from fastdeploy.model_executor.models.utils import default_weight_loader
|
from fastdeploy.model_executor.utils import (
|
||||||
|
default_weight_loader,
|
||||||
|
process_weights_after_loading,
|
||||||
|
)
|
||||||
|
|
||||||
stacked_params_mapping = [
|
stacked_params_mapping = [
|
||||||
# (param_name, shard_name, shard_id)
|
# (param_name, shard_name, shard_id)
|
||||||
@@ -328,6 +332,7 @@ class Qwen2ForCausalLM(ModelForCasualLM):
|
|||||||
]
|
]
|
||||||
|
|
||||||
params_dict = dict(self.named_parameters())
|
params_dict = dict(self.named_parameters())
|
||||||
|
process_weights_after_loading_fn = process_weights_after_loading(dict(self.named_sublayers()))
|
||||||
for loaded_weight_name, loaded_weight in weights_iterator:
|
for loaded_weight_name, loaded_weight in weights_iterator:
|
||||||
for param_name, weight_name, shard_id in stacked_params_mapping:
|
for param_name, weight_name, shard_id in stacked_params_mapping:
|
||||||
if weight_name not in loaded_weight_name:
|
if weight_name not in loaded_weight_name:
|
||||||
@@ -340,11 +345,14 @@ class Qwen2ForCausalLM(ModelForCasualLM):
|
|||||||
weight_loader(param, loaded_weight, shard_id)
|
weight_loader(param, loaded_weight, shard_id)
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
if loaded_weight_name not in params_dict:
|
model_param_name = loaded_weight_name
|
||||||
|
if model_param_name not in params_dict:
|
||||||
continue
|
continue
|
||||||
param = params_dict[loaded_weight_name]
|
param = params_dict[model_param_name]
|
||||||
weight_loader = getattr(param, "weight_loader", default_weight_loader(self.fd_config))
|
weight_loader = getattr(param, "weight_loader", default_weight_loader(self.fd_config))
|
||||||
weight_loader(param, loaded_weight)
|
weight_loader(param, loaded_weight)
|
||||||
|
model_sublayer_name = re.sub(r"\.(weight)$", "", model_param_name)
|
||||||
|
process_weights_after_loading_fn(model_sublayer_name, param)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def name(self):
|
def name(self):
|
||||||
|
@@ -99,6 +99,9 @@ model_param_map = {
|
|||||||
"tensor_parallel_size": 2,
|
"tensor_parallel_size": 2,
|
||||||
"quantizations": ["wint8"],
|
"quantizations": ["wint8"],
|
||||||
},
|
},
|
||||||
|
"Qwen2-7B-Instruct": {
|
||||||
|
"quantizations": ["None", "wint8"],
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
params = []
|
params = []
|
||||||
|
Reference in New Issue
Block a user