This commit is contained in:
2025-06-18 11:28:37 +08:00
commit 86d9daf90a
35 changed files with 1566 additions and 0 deletions

View File

@@ -0,0 +1,24 @@
import 'package:flutter/material.dart';
class AIAvatar extends StatelessWidget {
final double width;
final double height;
const AIAvatar({
super.key,
this.width = 120,
this.height = 140,
});
@override
Widget build(BuildContext context) {
return SizedBox(
width: width,
height: height,
child: Image.asset(
'assets/images/avatar.png',
fit: BoxFit.contain,
),
);
}
}

View File

@@ -0,0 +1,32 @@
import 'package:flutter/material.dart';
import '../models/chat_message.dart';
class ChatBubble extends StatelessWidget {
final ChatMessage message;
const ChatBubble({super.key, required this.message});
@override
Widget build(BuildContext context) {
return Align(
alignment: message.isUser ? Alignment.centerRight : Alignment.centerLeft,
child: Container(
margin: const EdgeInsets.symmetric(vertical: 8),
padding: const EdgeInsets.symmetric(horizontal: 16, vertical: 12),
decoration: BoxDecoration(
color: message.isUser
? const Color(0xFF6C63FF)
: Colors.white.withOpacity(0.1),
borderRadius: BorderRadius.circular(16),
),
child: Text(
message.text,
style: TextStyle(
color: message.isUser ? Colors.white : Colors.white,
fontSize: 16,
),
),
),
);
}
}

351
lib/widgets/chat_input.dart Normal file
View File

@@ -0,0 +1,351 @@
import 'dart:async';
import 'dart:io';
import 'package:flutter/material.dart';
import 'package:record/record.dart';
import 'package:http/http.dart' as http;
import 'package:http_parser/http_parser.dart';
import 'dart:typed_data';
import 'dart:convert';
import 'package:path_provider/path_provider.dart';
import 'dart:math';
class ChatInput extends StatefulWidget {
final Function(String) onSendMessage;
final Function(String) onAIResponse;
// 新增一个回调,用于处理 AI 流式回复
final Function(String, bool) onAIStreamResponse;
const ChatInput({
super.key,
required this.onSendMessage,
required this.onAIResponse,
required this.onAIStreamResponse, // 添加这个参数
});
@override
State<ChatInput> createState() => _ChatInputState();
}
class _ChatInputState extends State<ChatInput> {
final TextEditingController _controller = TextEditingController();
final AudioRecorder _recorder = AudioRecorder();
List<int> _audioBuffer = [];
String? _tempFilePath;
bool _isRecording = false;
// 添加静态变量存储用户ID和会话ID
static String? _cachedUserId;
static String? _cachedConversationId;
@override
void initState() {
super.initState();
_checkPermission();
}
Future<void> _checkPermission() async {
final hasPermission = await _recorder.hasPermission();
print('麦克风权限状态: $hasPermission');
}
@override
void dispose() {
_controller.dispose();
_recorder.dispose();
super.dispose();
}
void _handleSubmit() {
final text = _controller.text;
if (text.trim().isNotEmpty) {
widget.onSendMessage(text);
_controller.clear();
}
}
Future<void> _startRecording() async {
setState(() {
_isRecording = true;
_audioBuffer = [];
});
print('开始录音...');
try {
// 使用文件录音可能更稳定
final tempDir = await getTemporaryDirectory();
_tempFilePath = '${tempDir.path}/temp_audio_${DateTime.now().millisecondsSinceEpoch}.opus';
print('录音文件路径: $_tempFilePath');
if (await _recorder.hasPermission()) {
await _recorder.start(
RecordConfig(encoder: AudioEncoder.opus),
path: _tempFilePath!,
);
print('录音已开始使用OPUS格式文件: $_tempFilePath');
} else {
print('没有麦克风权限,无法开始录音');
}
} catch (e) {
print('录音开始出错: $e');
setState(() {
_isRecording = false;
});
}
}
Future<void> _stopAndSendRecording() async {
if (!_isRecording) return;
setState(() {
_isRecording = false;
});
print('停止录音...');
try {
final path = await _recorder.stop();
print('录音已停止,文件路径: $path');
if (path != null) {
final file = File(path);
if (await file.exists()) {
final bytes = await file.readAsBytes();
print('读取到录音数据: ${bytes.length} 字节');
if (bytes.isNotEmpty) {
// 第一步: 发送音频到语音识别接口
final recognizedText = await _sendAudioToServer(bytes);
if (recognizedText != null) {
// 把识别的文本作为用户消息展示
widget.onAIResponse(recognizedText);
// 第二步: 将识别的文本发送到 Chat SSE 接口
_sendTextToChatSSE(recognizedText);
}
} else {
print('录音数据为空');
}
} else {
print('录音文件不存在: $path');
}
} else {
print('录音路径为空');
}
} catch (e) {
print('停止录音或发送过程出错: $e');
}
}
// 修改返回类型,以便获取识别的文本
Future<String?> _sendAudioToServer(List<int> audioBytes) async {
try {
print('准备发送OPUS音频数据大小: ${audioBytes.length} 字节');
final uri = Uri.parse('http://143.64.185.20:18606/voice');
print('发送到: $uri');
final request = http.MultipartRequest('POST', uri);
request.files.add(
http.MultipartFile.fromBytes(
'audio',
audioBytes,
filename: 'record.wav',
contentType: MediaType('audio', 'wav'),
),
);
request.fields['lang'] = 'cn';
print('发送请求...');
final streamResponse = await request.send();
print('收到响应,状态码: ${streamResponse.statusCode}');
final response = await http.Response.fromStream(streamResponse);
if (response.statusCode == 200) {
print('响应内容: ${response.body}');
final text = _parseTextFromJson(response.body);
if (text != null) {
print('解析出文本: $text');
return text; // 返回识别的文本
} else {
print('解析文本失败');
}
} else {
print('请求失败,状态码: ${response.statusCode},响应: ${response.body}');
}
} catch (e) {
print('发送录音到服务器时出错: $e');
}
return null;
}
// 添加这个方法用于从JSON响应中解析文本
String? _parseTextFromJson(String body) {
try {
final decoded = jsonDecode(body);
if (decoded.containsKey('text')) {
return decoded['text'] as String?;
}
return null;
} catch (e) {
print('JSON解析错误: $e, 原始数据: $body');
return null;
}
}
// 新增方法:发送文本到 Chat SSE 接口
void _sendTextToChatSSE(String text) async {
print('将识别的文本发送到 Chat SSE 接口: $text');
// 如果用户ID未初始化则生成一个
if (_cachedUserId == null) {
_cachedUserId = _generateRandomUserId(6); // 生成6位随机ID
print('初始化用户ID: $_cachedUserId');
}
try {
// 使用 HttpClient 来处理 SSE
final client = HttpClient();
// 设置 URL 和参数
final chatUri = Uri.parse('http://143.64.185.20:18606/chat');
final request = await client.postUrl(chatUri);
// 设置请求头
request.headers.set('Content-Type', 'application/json');
request.headers.set('Accept', 'text/event-stream');
// 设置请求体
final body = {
'message': text,
'user': _cachedUserId,
};
// 如果有缓存的会话ID则添加到请求体
if (_cachedConversationId != null) {
body['conversation_id'] = _cachedConversationId;
print('使用缓存的会话ID: $_cachedConversationId');
} else {
print('首次请求不使用会话ID');
}
request.add(utf8.encode(json.encode(body)));
// 发送请求并获取 SSE 流
final response = await request.close();
if (response.statusCode == 200) {
print('SSE 连接成功,开始接收流数据');
// 创建一个变量保存累积的 AI 回复
String accumulatedResponse = '';
// 使用 transform 将流数据转换为字符串
await for (final data in response.transform(utf8.decoder)) {
// 处理 SSE 数据(格式为 "data: {...}\n\n"
final lines = data.split('\n');
for (var line in lines) {
if (line.startsWith('data:')) {
// 提取 JSON 数据部分
final jsonStr = line.substring(5).trim();
if (jsonStr == '[DONE]') {
// 流结束
print('SSE 流结束');
// 发送最终完整的回复,标记为完成
widget.onAIStreamResponse(accumulatedResponse, true);
break;
}
try {
final jsonData = json.decode(jsonStr);
// 尝试提取会话ID (如果存在)
if (jsonData.containsKey('conversation_id') && _cachedConversationId == null) {
_cachedConversationId = jsonData['conversation_id'];
print('从响应中提取并缓存会话ID: $_cachedConversationId');
}
// 提取并累加内容片段
if(jsonData['event'].toString().contains('message')){
// 代表是有实际消息的数据
final textChunk = jsonData.containsKey('answer') ?
jsonData['answer'] : '';
accumulatedResponse += textChunk;
widget.onAIStreamResponse(accumulatedResponse, false);
}
} catch (e) {
print('解析 SSE 数据出错: $e, 原始数据: $jsonStr');
}
}
}
}
} else {
print('SSE 连接失败,状态码: ${response.statusCode}');
}
} catch (e) {
print('SSE 连接出错: $e');
}
}
// 生成随机用户ID的辅助方法
String _generateRandomUserId(int length) {
const chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789';
final random = Random();
return String.fromCharCodes(
Iterable.generate(length, (_) => chars.codeUnitAt(random.nextInt(chars.length)))
);
}
// 添加一个新方法来呼出键盘
void _showKeyboard() {
FocusScope.of(context).requestFocus(FocusNode());
// 给输入框焦点,触发键盘弹出
Future.delayed(const Duration(milliseconds: 50), () {
FocusScope.of(context).requestFocus(
FocusNode()..requestFocus()
);
});
}
@override
Widget build(BuildContext context) {
return Container(
padding: const EdgeInsets.all(8.0),
color: Colors.transparent,
child: Row(
children: [
Expanded(
child: GestureDetector(
onLongPress: _startRecording,
onLongPressUp: _stopAndSendRecording,
child: AnimatedContainer(
duration: const Duration(milliseconds: 150),
height: 48,
alignment: Alignment.center,
decoration: BoxDecoration(
color: _isRecording
? Colors.white.withOpacity(0.25)
: Colors.white.withOpacity(0.1),
borderRadius: BorderRadius.circular(24),
),
child: Text(
_isRecording ? '正在说话中...' : '按住说话',
style: const TextStyle(color: Colors.white70, fontSize: 16),
),
),
),
),
IconButton(
// 替换为键盘图标
icon: const Icon(Icons.keyboard, color: Colors.white),
// 修改点击行为为呼出键盘
onPressed: _showKeyboard,
),
],
),
);
}
}

View File

@@ -0,0 +1,25 @@
import 'package:flutter/material.dart';
class GradientBackground extends StatelessWidget {
final Widget child;
const GradientBackground({super.key, required this.child});
@override
Widget build(BuildContext context) {
return Container(
decoration: const BoxDecoration(
gradient: LinearGradient(
begin: Alignment.topCenter,
end: Alignment.bottomCenter,
colors: [
Color(0xFF451663),
Color(0xFF17042B),
Color(0xFF0B021D),
],
),
),
child: child,
);
}
}