class CPUAWQLinearMethod(LinearMethodBase):
"""Linear method for CPU AWQ.
Args:
quant_config: The CPU AWQ quantization config.
"""
def __init__(self, quant_config: CPUAWQConfig) -> None:
self.quant_config = quant_config
assert self.quant_config.zero_point
def create_weights(
self,
layer: torch.nn.Module,
input_size_per_partition: int,
output_partition_sizes: list[int],
input_size: int,
output_size: int,
params_dtype: torch.dtype,
**extra_weight_attrs,
) -> None:
del output_size
output_size_per_partition = sum(output_partition_sizes)
weight_loader = extra_weight_attrs.get("weight_loader")
# Normalize group_size
if self.quant_config.group_size != -1:
group_size = self.quant_config.group_size
else:
group_size = input_size
qweight = PackedvLLMParameter(
data=torch.empty(
input_size_per_partition,
output_size_per_partition // self.quant_config.pack_factor,
dtype=torch.int32,
),
input_dim=0,
output_dim=1,
packed_dim=1,
packed_factor=self.quant_config.pack_factor,
weight_loader=weight_loader,
)
num_groups = input_size_per_partition // group_size
qzeros = PackedvLLMParameter(
data=torch.empty(
num_groups,
output_size_per_partition // self.quant_config.pack_factor,
dtype=torch.int32,
),
input_dim=0,
output_dim=1,
packed_dim=1,
packed_factor=self.quant_config.pack_factor,
weight_loader=weight_loader,
)
scales = GroupQuantScaleParameter(
data=torch.empty(
num_groups,
output_size_per_partition,
dtype=params_dtype,
),
input_dim=0,
output_dim=1,
weight_loader=weight_loader,
)
layer.register_parameter("qweight", qweight)
layer.register_parameter("qzeros", qzeros)
layer.register_parameter("scales", scales)
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
torch.set_printoptions(profile="full", linewidth=5000, sci_mode=False)
packed_weight = layer.qweight.data
packed_zeros = layer.qzeros.data
group_num = packed_zeros.size(0)
bits = self.quant_config.weight_bits
pack_factor = int(self.quant_config.pack_factor)
input_size, packed_output_size = packed_weight.size()
output_size = packed_output_size * pack_factor
isa_hint = _get_isa_hint(layer.scales.dtype)
layer.isa_hint = isa_hint
interleave_map = (0, 4, 1, 5, 2, 6, 3, 7)
weight = unpack_cols(
packed_weight,
bits,
input_size,
output_size,
)
zeros = unpack_cols(
packed_zeros,
bits,
group_num,
output_size,
)
weight = (
weight.view(input_size, -1, pack_factor)[:, :, interleave_map]
.reshape(input_size, output_size)
.contiguous()
)
zeros = (
zeros.view(group_num, -1, pack_factor)[:, :, interleave_map]
.reshape(group_num, output_size)
.contiguous()
)
zeros = pack_cols(zeros, bits, group_num, output_size).contiguous()
# make 16 output channel as a block and transpose to
# the make the block contigous
weight = pack_cols(weight, bits, input_size, output_size)
weight = (
weight.view(input_size, -1, 16 // pack_factor)
.permute(1, 0, 2)
.reshape(-1, input_size * 16 // pack_factor)
.contiguous()
)
layer.qweight.data = weight
layer.qzeros.data = zeros
def apply(
self,
layer: torch.nn.Module,
x: torch.Tensor,
bias: torch.Tensor | None = None,
) -> torch.Tensor:
x = cpu_gemm_wna16(
input=x,
q_weight=layer.qweight,
scales=layer.scales,
zeros=layer.qzeros,
g_idx=None,
bias=bias,
pack_factor=8,
isa_hint=layer.isa_hint,
)
return x