Текст на речь останавливает распознавание голоса в трепетеAndroid

Форум для тех, кто программирует под Android
Ответить Пред. темаСлед. тема
Anonymous
 Текст на речь останавливает распознавание голоса в трепете

Сообщение Anonymous »

Команды распознавания голоса не работают. Но в консоли это указывает на то, что она слушает. < /P>
Вот функции, которые я реализовал. Похоже, что проблема заключается в том, что выполнение распознавания голоса, в то время как TTS запускает, останавливает слушание. В качестве решения я стараюсь прекратить прослушивание голоса, пока TTS работает и начинает слушать голос после завершения речи TTS. Пожалуйста, помогите мне, я много попробовал, но не смог найти решение. < /P>
Вот мои функции < /p>
final TextToSpeechHelper _ttsHelper = TextToSpeechHelper();
final SpeechRecognitionService _speechService = SpeechRecognitionService();
final ExamFirebaseService _examService = ExamFirebaseService();
final TextEditingController _answerController = TextEditingController();
final GlobalKey _audioButtonKey =
GlobalKey();

bool _isInitialized = false;
bool _isDisposed = false;
String _lastWords = '';
bool _isListening = false;
bool _isSubmitting = false;

ExamState _currentState = ExamState.ready;

int _currentSectionIndex = 0;
int _currentQuestionIndex = -1;
int _currentSubQuestionIndex = -1;

// Question list without formal data models
List sections = [];

// Map to store answers
Map _answers = {};
final Map _audioRecordings = {};

@override
void initState() {
super.initState();
_initialize();
}

Future _initialize() async {
_determineExamState();
await _loadExamQuestions(widget.examData['id']);

// Delay TTS and STT setup to prevent overlap
Future.delayed(Duration(milliseconds: 300), () async {
if (mounted && !_isDisposed) {
//await _initSpeechAndStart();

// Delay voice command speaking until STT is ready
Future.delayed(Duration(seconds: 2), () {
if (mounted && !_isDisposed) {
_speakVoiceCommands();
}
});
}
});
}

void _speakVoiceCommands() {
if (_currentState == ExamState.ready) {
_speak(
'Voice commands available. Say "I am ready for exam" to start the exam.');
} else if (_currentState == ExamState.inProgress) {
_speak('Voice commands available. Say "next question" to move to the next question. ' +
'Say "previous question" to go back. Say "repeat question" to hear the current question again. ' +
'Say "submit exam" to finish and submit your exam.');
} else if (_currentState == ExamState.ended) {
_speak('Exam ended. Say "go to home" to return to the home page.');
}
}

void _endExam() {
// This method is called in the timer countdown's onEnd but doesn't exist
// It should be changed to _submitExam which already exists
if (mounted && !_isDisposed) {
_submitExam();
}
}

void _determineExamState() {
final DateTime now = DateTime.now();
final DateTime examStartTime = convertFirebaseTimestampAndTimeString(
widget.examData['examDateTime'], widget.examData['startTime']);
final DateTime examEndTime = convertFirebaseTimestampAndTimeString(
widget.examData['examDateTime'], widget.examData['endTime']);

if (now.isBefore(examStartTime)) {
_currentState = ExamState.ready;
} else if (now.isAfter(examEndTime)) {
_currentState = ExamState.ended;
} else {
_currentState = ExamState.inProgress;
}
}

Future _initSpeechAndStart() async {
if (!mounted || _isDisposed) return;

// Stop any ongoing TTS
await _ttsHelper.stop();

await _speechService.initSpeech();

if (!mounted || _isDisposed) return;

_safeSetState(() {
_isInitialized = true;
});

_startListening();
}

Future _loadExamQuestions(String examId) async {
try {
final data = await _examService.getExamWithQuestions(examId);

if (!mounted) return;

setState(() {
sections = (data['sections'] ?? []).cast();
});
} catch (e) {
print('Error loading exam questions: $e');
}
}

void _startListening() async {
if (!mounted || _isDisposed || _isListening) return;

try {
await _speechService.stopListening(); // Ensure previous session stops
await _speechService.startListening(onResult: _processResult);

setState(() {
_isListening = true;
});

// Timer to recheck listening state
Future.delayed(Duration(seconds: 5), () {
if (mounted && !_isDisposed && !_isListening) {
_speechService.checkAndRestartListening(_startListening);
}
});
} catch (e) {
print('Error starting listening: $e');

// Retry after short delay
Future.delayed(Duration(seconds: 2), () {
if (mounted && !_isDisposed) {
_startListening();
}
});
}
}

void _processResult(SpeechRecognitionResult result) {
if (!mounted || _isDisposed) return;

_safeSetState(() {
_lastWords = result.recognizedWords.toLowerCase();
print('Voice input: ${result.recognizedWords}');
// Process voice commands based on the current state

if (_currentState == ExamState.ready) {
if (_lastWords.contains('i am ready for exam') ||
_lastWords.contains('start exam') ||
_lastWords.contains('begin exam')) {
_goToFirstQuestion();
} else if (_lastWords.contains('help') ||
_lastWords.contains('commands')) {
_speakVoiceCommands();
}
} else if (_currentState == ExamState.inProgress) {
if (_lastWords.contains('next') && _lastWords.contains('question') ||
_lastWords.contains('go to next') ||
_lastWords.contains('forward')) {
_nextQuestion();
} else if (_lastWords.contains('previous question') ||
_lastWords.contains('go back') ||
_lastWords.contains('back')) {
_previousQuestion();
} else if (_lastWords.contains('repeat question') ||
_lastWords.contains('say again') ||
_lastWords.contains('what was the question')) {
_repeatCurrentQuestion();
} else if (_lastWords.contains('submit exam') ||
_lastWords.contains('finish exam') ||
_lastWords.contains('end exam')) {
_submitExam();
} else if (_lastWords.contains('which question') ||
_lastWords.contains('what question') ||
_lastWords.contains('question number') ||
_lastWords.contains('where am i')) {
_announceCurrentPosition();
} else if (_lastWords.contains('help') ||
_lastWords.contains('commands')) {
_speakVoiceCommands();
} else if (_lastWords.contains('start recording') ||
_lastWords.contains('record answer')) {
_startAnswerRecording();
} else if (_lastWords.contains('stop recording')) {
_stopAnswerRecording();
}
} else if (_currentState == ExamState.ended) {
if (_lastWords.contains('go to home') ||
_lastWords.contains('home page') ||
_lastWords.contains('go home')) {
_navigateToHome();
} else if (_lastWords.contains('help') ||
_lastWords.contains('commands')) {
_speakVoiceCommands();
}
}
});
}

void _announceCurrentPosition() {
if (sections.isEmpty) return;

if (_currentQuestionIndex == -1) {
_speak('You are at the exam introduction.');
return;
}

final sectionTitle = sections[_currentSectionIndex]['title'];
final questionNumber = _currentQuestionIndex + 1;
final subQuestionNumber = _currentSubQuestionIndex + 1;
final totalQuestions = sections[_currentSectionIndex]['questions'].length;
final totalSubQuestions = sections[_currentSectionIndex]['questions']
[_currentQuestionIndex]['subQuestions']
.length;

_speak(
'You are in section $sectionTitle, question $questionNumber out of $totalQuestions, ' +
'sub-question $subQuestionNumber out of $totalSubQuestions.');
}

void _repeatCurrentQuestion() {
if (sections.isEmpty ||
_currentQuestionIndex == -1 ||
_currentSectionIndex >= sections.length ||
_currentQuestionIndex >=
sections[_currentSectionIndex]['questions'].length) {
return;
}

final question =
sections[_currentSectionIndex]['questions'][_currentQuestionIndex];
final subQuestion = question['subQuestions'][_currentSubQuestionIndex];

_ttsHelper.stop();
_speak('Question ${_currentQuestionIndex + 1}: ${question['title']}');
Future.delayed(Duration(milliseconds: 2000), () {
_speak(
'Sub-question ${_currentSubQuestionIndex + 1}: ${subQuestion['text']}. Worth ${subQuestion['marks']} marks.');
});
}

// For voice-activated answer recording
bool _isRecordingAnswer = false;
String _currentRecordedAnswer = '';

void _startAnswerRecording() {
if (!mounted || _isDisposed) return;

_speak(
'Starting to record your answer. Speak clearly. Say "stop recording" when finished.');
setState(() {
_isRecordingAnswer = true;
_currentRecordedAnswer = '';
});

// Stop the regular command listener and start a dedicated answer listener
_speechService.stopListening();
Future.delayed(Duration(milliseconds: 500), () {
_speechService.startListening(onResult: _processAnswerRecording);
});
}

void _processAnswerRecording(SpeechRecognitionResult result) {
if (!mounted || _isDisposed || !_isRecordingAnswer) return;

final words = result.recognizedWords.toLowerCase();
print('Voice input: ${result.recognizedWords}');
if (words.contains('stop recording')) {
_stopAnswerRecording();
return;
}

// Append the recognized text to the current answer
_currentRecordedAnswer += ' ' + result.recognizedWords;

// Update the text field
_answerController.text = _currentRecordedAnswer.trim();

// Save the answer
_saveCurrentAnswer();
}

void _stopAnswerRecording() {
if (!mounted || _isDisposed) return;

_speak('Recording stopped. Your answer has been saved.');
setState(() {
_isRecordingAnswer = false;
});

// Stop the answer listener and restart the command listener
_speechService.stopListening();
Future.delayed(Duration(milliseconds: 500), () {
_startListening();
});
}

void _saveCurrentAnswer() {
if (_currentQuestionIndex == -1 || _currentSubQuestionIndex == -1) return;

// Create a unique key for this question/subquestion
final answerKey =
'${_currentSectionIndex}_${_currentQuestionIndex}_${_currentSubQuestionIndex}';

// Save the answer
_answers[answerKey] = _answerController.text;
}

void _safeSetState(VoidCallback fn) {
if (mounted && !_isDisposed) {
setState(fn);
}
}

@override
void dispose() {
_isDisposed = true;
_speechService.stopListening();
_ttsHelper.stop();
_answerController.dispose();
super.dispose();
}

void _nextQuestion() {
if (!mounted || _isDisposed) return;

// Save the current answer before moving
_saveCurrentAnswer();

_ttsHelper.stop();
if (_isLastQuestion()) {
// Show confirmation dialog instead of auto-submitting
_showSubmitConfirmationDialog();
return;
}
setState(() {
// Check if we have sections loaded
if (sections.isEmpty || _currentSectionIndex >= sections.length) {
return;
}

// Check if we're at a valid question index
if (_currentQuestionIndex >= 0 &&
_currentQuestionIndex <
sections[_currentSectionIndex]['questions'].length) {
final currentQuestion =
sections[_currentSectionIndex]['questions'][_currentQuestionIndex];

if (_currentSubQuestionIndex <
currentQuestion['subQuestions'].length - 1) {
// Go to next subquestion
_currentSubQuestionIndex++;
} else {
// Go to next question
_currentSubQuestionIndex = 0;
if (_currentQuestionIndex <
sections[_currentSectionIndex]['questions'].length - 1) {
_currentQuestionIndex++;
} else {
// Go to next section
if (_currentSectionIndex < sections.length - 1) {
_currentSectionIndex++;
_currentQuestionIndex = 0;
} else {
// End exam if all sections are completed
_submitExam();
return;
}
}
}
} else if (_currentQuestionIndex == -1) {
// First question
_goToFirstQuestion();
}

// Load the answer for the new question if it exists
_loadCurrentAnswer();
});
}

bool _isLastQuestion() {
if (sections.isEmpty) return false;

final lastSectionIndex = sections.length - 1;
if (_currentSectionIndex < lastSectionIndex) return false;

final lastQuestionIndex =
sections[lastSectionIndex]['questions'].length - 1;
if (_currentQuestionIndex < lastQuestionIndex) return false;

final lastSubQuestionIndex = sections[lastSectionIndex]['questions']
[lastQuestionIndex]['subQuestions']
.length -
1;
return _currentSubQuestionIndex >= lastSubQuestionIndex;
}

void _goToFirstQuestion() {
if (!mounted || _isDisposed || sections.isEmpty) return;

_ttsHelper.stop();

setState(() {
_currentSectionIndex = 0;
_currentQuestionIndex = 0;
_currentSubQuestionIndex = 0;

// Load the answer for this question if it exists
_loadCurrentAnswer();
});

Future.delayed(Duration(milliseconds: 500), () {
_announceCurrentPosition();
_repeatCurrentQuestion();
});
}

Future _speak(String text) async {
if (!mounted || _isDisposed) return;

// Stop voice recognition before speaking
await _speechService.stopListening();

// Initialize TTS
await _ttsHelper.initTTS(
language: "en-US", rate: 0.5, pitch: 1.0, volume: 1.0);

// Speak and wait for completion
await _ttsHelper.speak(text);

// Now that speaking is done, restart speech recognition
if (!_isDisposed && mounted) {
await _initSpeechAndStart();
}
}
< /code>
Я попытался остановить TTS, чтобы начать распознавание голоса, изменив эту функцию < /p>
Future _speak(String text) async {
if (!mounted || _isDisposed) return;

// Stop voice recognition before speaking
await _speechService.stopListening();

// Initialize TTS
await _ttsHelper.initTTS(
language: "en-US", rate: 0.5, pitch: 1.0, volume: 1.0);

// Speak and wait for completion
await _ttsHelper.speak(text);

// Now that speaking is done, restart speech recognition
if (!_isDisposed && mounted) {
await _initSpeechAndStart();
}
}
< /code>
Пожалуйста, помогите мне найти способ правильного распознавания голоса. Спасибо!

Подробнее здесь: https://stackoverflow.com/questions/795 ... in-flutter
Реклама
Ответить Пред. темаСлед. тема

Быстрый ответ

Изменение регистра текста: 
Смайлики
:) :( :oops: :roll: :wink: :muza: :clever: :sorry: :angel: :read: *x)
Ещё смайлики…
   
К этому ответу прикреплено по крайней мере одно вложение.

Если вы не хотите добавлять вложения, оставьте поля пустыми.

Максимально разрешённый размер вложения: 15 МБ.

  • Похожие темы
    Ответы
    Просмотры
    Последнее сообщение

Вернуться в «Android»