Проблема с отправкой звука через пользовательский AudioDeviceModule с помощью WebRTC на C++.C++

Программы на C++. Форум разработчиков
Ответить
Гость
 Проблема с отправкой звука через пользовательский AudioDeviceModule с помощью WebRTC на C++.

Сообщение Гость »

Я пытаюсь реализовать плагин WebRTC для Unreal Engine с использованием C++, и у меня возникли проблемы с отправкой звука через созданный мною Custom AudioDeviceModule.
Проблема довольно проста, я могу см. Соединение, установленное между узлами, я вижу, как Unreal Engine захватывает и передает звук в ADM через AudioTransport->RecordedDataIsAvailable(), но я не вижу, чтобы звук, передаваемый в AudioTransport, транспортировался, поскольку ни другой узел, ни Wireshark не могут видеть никаких данных. передача.
Вот мой код ADM:

Код: Выделить всё

FCustomAudioDeviceModule::FCustomAudioDeviceModule()
{
AudioCapturer = MakeUnique();
}

FCustomAudioDeviceModule::~FCustomAudioDeviceModule()
{

}

// Retrieve the currently utilized audio layer
int32_t FCustomAudioDeviceModule::ActiveAudioLayer(webrtc::AudioDeviceModule::AudioLayer* audioLayer) const
{
*audioLayer = webrtc::AudioDeviceModule::AudioLayer::kDummyAudio;
return 0;
}

// Full-duplex transportation of PCM audio
int32_t FCustomAudioDeviceModule::RegisterAudioCallback(webrtc::AudioTransport* audioCallback)
{
AudioCapturer->RegisterAudioTransport(audioCallback);
return 0;
}

// Main initialization and termination
int32_t FCustomAudioDeviceModule::Init()
{
AudioCapturer->Init();
bIsInitialized = true;
return 0;
}

int32_t FCustomAudioDeviceModule::Terminate()
{
bIsInitialized = false;
return 0;
}

bool FCustomAudioDeviceModule::Initialized() const
{
return bIsInitialized;
}

// Device enumeration
int16_t FCustomAudioDeviceModule::PlayoutDevices()
{
return -1;
}

int16_t FCustomAudioDeviceModule::RecordingDevices()
{
return -1;
}

int32_t FCustomAudioDeviceModule::PlayoutDeviceName(uint16_t index, char name[webrtc::kAdmMaxDeviceNameSize], char guid[webrtc::kAdmMaxGuidSize])
{
return -1;
}

int32_t FCustomAudioDeviceModule::RecordingDeviceName(uint16_t index, char name[webrtc::kAdmMaxDeviceNameSize],  char guid[webrtc::kAdmMaxGuidSize])
{
return -1;
}

// Device selection
int32_t FCustomAudioDeviceModule::SetPlayoutDevice(uint16_t index)
{
return 0;
}

int32_t FCustomAudioDeviceModule::SetPlayoutDevice(WindowsDeviceType device)
{
return 0;
}

// Audio transport initialization
int32_t FCustomAudioDeviceModule::PlayoutIsAvailable(bool* available)
{
LOG_MESSAGE("ADM was queried for PlayoutIsAvailable.");
*available = false;
return 0;
}

int32_t FCustomAudioDeviceModule::InitPlayout()
{
LOG_ERROR("InitPlayout shouldn't have been called!");
return -1;
}

bool FCustomAudioDeviceModule::PlayoutIsInitialized() const
{
return false;
}

// Audio transport control
int32_t FCustomAudioDeviceModule::StartPlayout()
{
LOG_ERROR("StartPlayout shouldn't have been called!");
return -1;
}

int32_t FCustomAudioDeviceModule::StopPlayout()
{
LOG_ERROR("StopPlayout shouldn't have been called!");
return -1;
}

bool FCustomAudioDeviceModule::Playing() const
{
return false;
}

int32_t FCustomAudioDeviceModule::SetRecordingDevice(uint16_t index)
{
return 0;
}

int32_t FCustomAudioDeviceModule::SetRecordingDevice(WindowsDeviceType device)
{
return 0;
}

int32_t FCustomAudioDeviceModule::RecordingIsAvailable(bool* available)
{
LOG_MESSAGE("ADM was queried for RecordingIsAvailable.");
*available = false;
return 0;
}

int32_t FCustomAudioDeviceModule::InitRecording()
{
LOG_MESSAGE("ADM InitRecording");
if (!AudioCapturer->IsInitialized())
{
AudioCapturer->Init();
}

return 0;
}

bool FCustomAudioDeviceModule::RecordingIsInitialized() const
{
return AudioCapturer->IsInitialized();
}

int32_t FCustomAudioDeviceModule::StartRecording()
{
LOG_MESSAGE("ADM StartRecording");
AudioCapturer->StartCapturing();
bIsRecording = true;
return 0;
}

int32_t FCustomAudioDeviceModule::StopRecording()
{
LOG_MESSAGE("ADM StopRecording");
AudioCapturer->StopCapturing();
bIsRecording = false;
return 0;
}

bool FCustomAudioDeviceModule::Recording() const
{
return bIsRecording;
}

// Audio mixer initialization
int32_t FCustomAudioDeviceModule::InitSpeaker()
{
return 0;
}

bool FCustomAudioDeviceModule::SpeakerIsInitialized() const
{
return false;
}

int32_t FCustomAudioDeviceModule::InitMicrophone()
{
return 0;
}

bool FCustomAudioDeviceModule::MicrophoneIsInitialized() const
{
return true;
}

int32_t FCustomAudioDeviceModule::SpeakerVolumeIsAvailable(bool* available) {
*available = false;  // Indicate that volume control is not available
return -1;  // Operation successful
}

int32_t FCustomAudioDeviceModule::SetSpeakerVolume(uint32_t volume) {
return -1;  // Operation not supported
}

int32_t FCustomAudioDeviceModule::SpeakerVolume(uint32_t* volume) const {
return -1;  // Operation successful
}

int32_t FCustomAudioDeviceModule::MaxSpeakerVolume(uint32_t* maxVolume) const {
return -1;  // Operation successful
}

int32_t FCustomAudioDeviceModule::MinSpeakerVolume(uint32_t* minVolume) const {
return -1;  // Operation successful
}

int32_t FCustomAudioDeviceModule::MicrophoneVolumeIsAvailable(bool* available) {
return 0;  // Operation successful
}

int32_t FCustomAudioDeviceModule::SetMicrophoneVolume(uint32_t volume) {
return 0;  // Operation not supported
}

int32_t FCustomAudioDeviceModule::MicrophoneVolume(uint32_t* volume) const {
return 0;  // Operation successful
}

int32_t FCustomAudioDeviceModule::MaxMicrophoneVolume(uint32_t* maxVolume) const {
*maxVolume = FWebRTCAudioCaptureComponent::MaxVolumeLevel;  // Arbitrary max volume
return 0;  // Operation successful
}

int32_t FCustomAudioDeviceModule::MinMicrophoneVolume(uint32_t* minVolume) const {
return 0;  // Operation successful
}

int32_t FCustomAudioDeviceModule::SpeakerMuteIsAvailable(bool* available) {
*available = false;   // Indicate that mute control is not available
return -1;
}

int32_t FCustomAudioDeviceModule::SetSpeakerMute(bool enable) {
return -1;
}

int32_t FCustomAudioDeviceModule::SpeakerMute(bool* enabled) const {
*enabled = false;  // Speaker is not muted
return -1;
}

int32_t FCustomAudioDeviceModule::MicrophoneMuteIsAvailable(bool* available) {
*available = false;  // Indicate that mute control is not available
return -1;
}

int32_t FCustomAudioDeviceModule::SetMicrophoneMute(bool enable) {
return -1;
}

int32_t FCustomAudioDeviceModule::MicrophoneMute(bool* enabled) const {
*enabled = false;  // Microphone is not muted
return -1;  // Operation successful
}

// Stereo support
int32_t FCustomAudioDeviceModule::StereoPlayoutIsAvailable(bool* available) const
{
*available = false;
return 0;
}

int32_t FCustomAudioDeviceModule::SetStereoPlayout(bool enable)
{
return 0;
}

int32_t FCustomAudioDeviceModule::StereoPlayout(bool* enabled) const
{
*enabled = false;
return 0;
}
int32_t FCustomAudioDeviceModule::StereoRecordingIsAvailable(bool* available) const
{
*available = true;
return 0;
}

int32_t FCustomAudioDeviceModule::SetStereoRecording(bool enable)
{
return 0;
}

int32_t FCustomAudioDeviceModule::StereoRecording(bool* enabled) const
{
*enabled = true;
return 0;
}

// Playout delay
int32_t FCustomAudioDeviceModule::PlayoutDelay(uint16_t* delayMS) const
{
*delayMS = 0;
return 0;
}
Вот логика создания PeerConnection:

Код: Выделить всё

void FWebRTCClient::CreatePeerConnection()
{
if (!PeerConnectionFactory)
{
CreatePeerConnectionFactory();
}

std::vector stunServers = {
"stun:stun.l.google.com:19302",
"stun:stun1.l.google.com:19302",
"stun:stun2.l.google.com:19302",
"stun:stun3.l.google.com:19302",
"stun:stun4.l.google.com:19302"
};

webrtc::PeerConnectionInterface::RTCConfiguration Config;

for (const auto& uri : stunServers) {
webrtc::PeerConnectionInterface::IceServer StunServer;
StunServer.uri = uri;
Config.servers.push_back(StunServer);
}

webrtc::RTCErrorOr Result = PeerConnectionFactory->CreatePeerConnectionOrError(Config, webrtc::PeerConnectionDependencies(this));
if (!Result.ok())
{
Delegate->OnWebRTCError(FString::Printf(TEXT("Failed to Create PeerConnectionObject: %s"), Result.error().message()));
return;
}

// move object's ownership to the class.
PeerConnection = Result.MoveValue();

// Create an audio source.  This might involve your custom audio capture logic.
cricket::AudioOptions AudioOptions;

rtc::scoped_refptr AudioSource = PeerConnectionFactory->CreateAudioSource(AudioOptions);
rtc::scoped_refptr AudioTrack = PeerConnectionFactory->CreateAudioTrack("audioLabel", AudioSource);

// Add the audio track to the peer connection
auto AddTrackResult = PeerConnection->AddTrack(AudioTrack, { "streamId" }); // "streamId" is arbitrary and used to identify the stream
if (!AddTrackResult.ok()) {
// Handle the error
Delegate->OnWebRTCError(FString::Printf(TEXT("Failed to add audio track: %s"), AddTrackResult.error().message()));
return;
}
}

void FWebRTCClient::CreatePeerConnectionFactory()
{
NetworkThread = rtc::Thread::CreateWithSocketServer();
NetworkThread->Start();

WorkerThread = rtc::Thread::Create();
WorkerThread->Start();

SignallingThread = rtc::Thread::Create();
SignallingThread->Start();

rtc::scoped_refptr AudioDeviceModule = new rtc::RefCountedObject();
rtc::scoped_refptr AudioProcessingModule = webrtc::AudioProcessingBuilder().Create();
{
webrtc::AudioProcessing::Config Config;
// Enabled multi channel audio capture/render
Config.pipeline.multi_channel_capture = true;
Config.pipeline.multi_channel_render = true;
Config.pipeline.maximum_internal_processing_rate = 48000;
// Turn off all other audio processing effects in UE's WebRTC. We want to stream audio from UE as pure as possible.
Config.pre_amplifier.enabled = false;
Config.high_pass_filter.enabled = false;
Config.echo_canceller.enabled = false;
Config.noise_suppression.enabled = false;
Config.transient_suppression.enabled = false;
Config.gain_controller1.enabled = false;
Config.gain_controller2.enabled = false;
#if !WEBRTC_5414
Config.voice_detection.enabled = false;
Config.residual_echo_detector.enabled = false;
Config.level_estimation.enabled = false;
#endif

// Apply the config.
AudioProcessingModule->ApplyConfig(Config);
}

auto AudioEncoderFactory = webrtc::CreateAudioEncoderFactory();
auto AudioDecoderFactory = webrtc::CreateAudioDecoderFactory();

PeerConnectionFactory = webrtc::CreatePeerConnectionFactory(
NetworkThread.get(),   // Network thread
WorkerThread.get(),    // Worker thread
SignallingThread.get(), // Signaling thread
AudioDeviceModule,     // Audio device module
AudioEncoderFactory,   // Audio Encoder Factory
AudioDecoderFactory,   // Audio Decoder Factory
nullptr,
nullptr,
nullptr,
AudioProcessingModule
);

checkf(PeerConnectionFactory, TEXT("Failed to create peer connection factory!"));
}
Я пытался понять, что делает WebRTC после вызова обратного вызова AudioTransport->RecordedDataIsAvailable(), но запутался в базе кода.
Я чувствую, что мне не хватает очень важной информации, и буду рад любой помощи, поскольку занимаюсь этой проблемой уже несколько дней.
Спасибо,

Подробнее здесь: https://stackoverflow.com/questions/782 ... ebrtc-in-c
Ответить

Быстрый ответ

Изменение регистра текста: 
Смайлики
:) :( :oops: :roll: :wink: :muza: :clever: :sorry: :angel: :read: *x)
Ещё смайлики…
   
К этому ответу прикреплено по крайней мере одно вложение.

Если вы не хотите добавлять вложения, оставьте поля пустыми.

Максимально разрешённый размер вложения: 15 МБ.

Вернуться в «C++»