Мне нужно воспроизвести звук через источник звука Unity, потому что я использую источник звука для пакет синхронизации губ.
Я могу воспроизводить Neural Voice и даже создавать аудиоклипы локально. Но мне не удалось получить сгенерированный аудиоклип из локального хранилища во время выполнения и воспроизвести его в AudioSource.
Возможно ли это как для Desktop, так и для Oculus? Я использую Cognitive Services Speech SDK для Unity.
Код: Выделить всё
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
//
using System;
using System.Threading;
using UnityEngine;
using UnityEngine.UI;
using Microsoft.CognitiveServices.Speech;
public class HelloWorld : MonoBehaviour
{
// Hook up the three properties below with a Text, InputField and Button object in your UI.
public Text outputText;
public InputField inputField;
public Button speakButton;
public AudioSource audioSource;
// Replace with your own subscription key and service region (e.g., "westus").
private const string SubscriptionKey = "YourSubscriptionKey";
private const string Region = "YourServiceRegion";
private const int SampleRate = 24000;
private object threadLocker = new object();
private bool waitingForSpeak;
private bool audioSourceNeedStop;
private string message;
private SpeechConfig speechConfig;
private SpeechSynthesizer synthesizer;
public void ButtonClick()
{
lock (threadLocker)
{
waitingForSpeak = true;
}
string newMessage = null;
var startTime = DateTime.Now;
// Starts speech synthesis, and returns once the synthesis is started.
using (var result = synthesizer.StartSpeakingTextAsync(inputField.text).Result)
{
// Native playback is not supported on Unity yet (currently only supported on Windows/Linux Desktop).
// Use the Unity API to play audio here as a short term solution.
// Native playback support will be added in the future release.
var audioDataStream = AudioDataStream.FromResult(result);
var isFirstAudioChunk = true;
var audioClip = AudioClip.Create(
"Speech",
SampleRate * 600, // Can speak 10mins audio as maximum
1,
SampleRate,
true,
(float[] audioChunk) =>
{
var chunkSize = audioChunk.Length;
var audioChunkBytes = new byte[chunkSize * 2];
var readBytes = audioDataStream.ReadData(audioChunkBytes);
if (isFirstAudioChunk && readBytes > 0)
{
var endTime = DateTime.Now;
var latency = endTime.Subtract(startTime).TotalMilliseconds;
newMessage = $"Speech synthesis succeeded!\nLatency: {latency} ms.";
isFirstAudioChunk = false;
}
for (int i = 0; i < chunkSize; ++i)
{
if (i < readBytes / 2)
{
audioChunk[i] = (short)(audioChunkBytes[i * 2 + 1]
Подробнее здесь: [url]https://stackoverflow.com/questions/79167470/how-to-play-neural-voice-to-unity-audio-source-in-unity-microsoft-cognitive-se[/url]
Мобильная версия