SUimeModelTraner/exported_models_test_new/inference_example.py

231 lines
8.1 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#!/usr/bin/env python3
"""
ONNX模型推理示例
展示如何使用导出的两个ONNX模型进行推理
包括束搜索beam search算法。
"""
import numpy as np
import onnxruntime as ort
import torch
import torch.nn.functional as F
from typing import List, Tuple
class ONNXInference:
"""ONNX模型推理器"""
def __init__(self, context_encoder_path, decoder_path):
"""
初始化ONNX推理器
Args:
context_encoder_path: 上下文编码器ONNX模型路径
decoder_path: 解码器ONNX模型路径
"""
# 创建ONNX Runtime会话
self.context_encoder_session = ort.InferenceSession(
context_encoder_path,
providers=['CPUExecutionProvider'] # 或 'CUDAExecutionProvider'
)
self.decoder_session = ort.InferenceSession(
decoder_path,
providers=['CPUExecutionProvider']
)
# 获取输入输出名称
self.context_input_names = [input.name for input in self.context_encoder_session.get_inputs()]
self.context_output_names = [output.name for output in self.context_encoder_session.get_outputs()]
self.decoder_input_names = [input.name for input in self.decoder_session.get_inputs()]
self.decoder_output_names = [output.name for output in self.decoder_session.get_outputs()]
print(f"上下文编码器输入: {self.context_input_names}")
print(f"上下文编码器输出: {self.context_output_names}")
print(f"解码器输入: {self.decoder_input_names}")
print(f"解码器输出: {self.decoder_output_names}")
def prepare_inputs(self, text_before, text_after, pinyin, slot_chars, tokenizer, query_engine, max_seq_len=128):
"""
准备模型输入(与原始推理脚本保持一致)
注意: 这里需要实现文本到token的转换
为了简化示例,假设已经实现了相关函数
"""
# 这里应该调用实际的预处理函数
# 返回: input_ids, pinyin_ids, attention_mask, history_slot_ids
raise NotImplementedError("请实现实际的输入预处理")
def run_context_encoder(self, input_ids, pinyin_ids, attention_mask):
"""
运行上下文编码器
Args:
input_ids: [batch, seq_len]
pinyin_ids: [batch, 24]
attention_mask: [batch, seq_len]
Returns:
context_H, pinyin_P, context_mask, pinyin_mask
"""
# 准备输入
inputs = {
"input_ids": input_ids.numpy() if isinstance(input_ids, torch.Tensor) else input_ids,
"pinyin_ids": pinyin_ids.numpy() if isinstance(pinyin_ids, torch.Tensor) else pinyin_ids,
"attention_mask": attention_mask.numpy() if isinstance(attention_mask, torch.Tensor) else attention_mask,
}
# 运行推理
outputs = self.context_encoder_session.run(self.context_output_names, inputs)
# 解包输出
context_H, pinyin_P, context_mask, pinyin_mask = outputs
return (
torch.from_numpy(context_H),
torch.from_numpy(pinyin_P),
torch.from_numpy(context_mask),
torch.from_numpy(pinyin_mask),
)
def run_decoder(self, context_H, pinyin_P, history_slot_ids, context_mask, pinyin_mask):
"""
运行解码器
Args:
context_H: [batch, seq_len, 512]
pinyin_P: [batch, 24, 512]
history_slot_ids: [batch, 8]
context_mask: [batch, seq_len]
pinyin_mask: [batch, 24]
Returns:
logits: [batch, vocab_size]
"""
# 准备输入
inputs = {
"context_H": context_H.numpy() if isinstance(context_H, torch.Tensor) else context_H,
"pinyin_P": pinyin_P.numpy() if isinstance(pinyin_P, torch.Tensor) else pinyin_P,
"history_slot_ids": history_slot_ids.numpy() if isinstance(history_slot_ids, torch.Tensor) else history_slot_ids,
"context_mask": context_mask.numpy() if isinstance(context_mask, torch.Tensor) else context_mask,
"pinyin_mask": pinyin_mask.numpy() if isinstance(pinyin_mask, torch.Tensor) else pinyin_mask,
}
# 运行推理
outputs = self.decoder_session.run(self.decoder_output_names, inputs)
# 解包输出
logits = outputs[0]
return torch.from_numpy(logits)
def beam_search(self, context_H, pinyin_P, context_mask, pinyin_mask,
beam_size=5, max_length=10, vocab_size=10019):
"""
束搜索算法示例
Args:
context_H: 上下文编码
pinyin_P: 拼音编码
context_mask: 上下文掩码
pinyin_mask: 拼音掩码
beam_size: 束大小
max_length: 最大生成长度
vocab_size: 词汇表大小
Returns:
最佳序列列表
"""
# 初始束空序列分数为0
beams = [([], 0.0)] # (序列, 对数概率)
for step in range(max_length):
new_beams = []
for seq, score in beams:
# 构建history_slot_ids已确认的字符ID
if len(seq) < 8:
history = seq + [0] * (8 - len(seq))
else:
history = seq[-8:] # 只保留最近8个
history_tensor = torch.tensor([history], dtype=torch.long)
# 运行解码器
logits = self.run_decoder(
context_H, pinyin_P, history_tensor,
context_mask, pinyin_mask
)
# 获取概率
probs = F.softmax(logits[0], dim=-1)
# 获取top-k候选
top_probs, top_indices = torch.topk(probs, beam_size)
# 扩展束
for prob, idx in zip(top_probs, top_indices):
new_seq = seq + [idx.item()]
new_score = score + torch.log(prob).item()
new_beams.append((new_seq, new_score))
# 剪枝保留beam_size个最佳候选
new_beams.sort(key=lambda x: x[1], reverse=True)
beams = new_beams[:beam_size]
# 检查是否所有序列都已结束以结束符0结尾
all_ended = all(seq[-1] == 0 for seq, _ in beams if seq)
if all_ended:
break
return beams
def predict_single(self, input_ids, pinyin_ids, attention_mask, history_slot_ids):
"""
单步预测
Args:
input_ids: 输入token IDs
pinyin_ids: 拼音IDs
attention_mask: 注意力掩码
history_slot_ids: 历史槽位IDs
Returns:
预测logits
"""
# 1. 运行上下文编码器
context_H, pinyin_P, context_mask, pinyin_mask = self.run_context_encoder(
input_ids, pinyin_ids, attention_mask
)
# 2. 运行解码器
logits = self.run_decoder(
context_H, pinyin_P, history_slot_ids,
context_mask, pinyin_mask
)
return logits
def main():
"""示例主函数"""
print("ONNX模型推理示例")
print("=" * 60)
# 初始化推理器
context_encoder_path = "context_encoder.onnx"
decoder_path = "decoder.onnx"
if not os.path.exists(context_encoder_path) or not os.path.exists(decoder_path):
print("错误: 找不到ONNX模型文件")
print("请先运行export_onnx.py导出模型")
return
inference = ONNXInference(context_encoder_path, decoder_path)
print("✅ ONNX推理器初始化完成")
print("请参考此示例实现完整的输入法推理流程")
if __name__ == "__main__":
main()