Ошибка многопроцессорного сломанного канала, когда я пытаюсь отправить данные графика из процессаPython

Программы на Python
Ответить Пред. темаСлед. тема
Гость
 Ошибка многопроцессорного сломанного канала, когда я пытаюсь отправить данные графика из процесса

Сообщение Гость »

Я создал приложение pyqt5 для проверки микрофонов. Для этой цели я использовал модули pydub и pyaudio. Я также отображаю данные микрофона с помощью matplotlib. У меня есть QDialog, который запускает эмиттер для связи с qdialog и многопроцессорную обработку для чтения из входного потока pyaudio. Когда в пользовательском интерфейсе я выбираю нормализацию звука микрофона, когда я не говорю, слышен шум. Также в этом случае через несколько минут приложение вылетает с этой ошибкой:
Traceback (most recent call last):
File "C:\Users\chris\Documents\My Projects\papinhio-player\src\python+\main-window\../..\python+\menu-1\manage-input-and-output-sound-devices\microphone-input-device-settings\microphone-input-device-setting.py", line 866, in run
self.to_emitter.send({"type":"plot_data","plot_data":[self.x_vals,self.y_vals],"normalized_value":normalized_value})
File "C:\Python\Lib\multiprocessing\connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "C:\Python\Lib\multiprocessing\connection.py", line 301, in _send_bytes
nwritten, err = ov.GetOverlappedResult(True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
BrokenPipeError: [WinError 109] Η διοχέτευση έχει τερματιστεί

Process Manage_Input_Device_Child_Proc-1:
Traceback (most recent call last):
File "C:\Users\chris\Documents\My Projects\papinhio-player\src\python+\main-window\../..\python+\menu-1\manage-input-and-output-sound-devices\microphone-input-device-settings\microphone-input-device-setting.py", line 866, in run
self.to_emitter.send({"type":"plot_data","plot_data":[self.x_vals,self.y_vals],"normalized_value":normalized_value})
File "C:\Python\Lib\multiprocessing\connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "C:\Python\Lib\multiprocessing\connection.py", line 301, in _send_bytes
nwritten, err = ov.GetOverlappedResult(True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
BrokenPipeError: [WinError 109] Η διοχέτευση έχει τερματιστεί

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "C:\Python\Lib\multiprocessing\process.py", line 314, in _bootstrap
self.run()
File "C:\Users\chris\Documents\My Projects\papinhio-player\src\python+\main-window\../..\python+\menu-1\manage-input-and-output-sound-devices\microphone-input-device-settings\microphone-input-device-setting.py", line 875, in run
self.to_emitter.send({"type":"error","error_message":error_message})
File "C:\Python\Lib\multiprocessing\connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "C:\Python\Lib\multiprocessing\connection.py", line 289, in _send_bytes
ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
BrokenPipeError: [WinError 232] Η διοχέτευση κλείνει

Process finished with exit code -1073741571 (0xC00000FD)

Ошибка связана с данными графика. Если я прокомментирую эту строку: self.to_emitter.send({"type":"plot_data","plot_data":[self.x_vals,self.y_vals],"normalized_value":normalized_value}) тогда не будет сбой приложения.
Относительный код:
class Manage_Input_Device_Emitter(QThread):
try:
plot_data_signal = pyqtSignal(list,float)
save_finished = pyqtSignal()
devices_settings = pyqtSignal(list,int,float,float,float,float,float)
error_signal = pyqtSignal(str)
except:
pass

def __init__(self, from_process: Pipe):
try:
super().__init__()
self.data_from_process = from_process
except:
pass

def run(self):
try:
while True:
data = self.data_from_process.recv()
if data["type"]=="plot_data":
self.plot_data_signal.emit(data["plot_data"],data["normalized_value"])
elif data["type"]=="save_finished":
self.save_finished.emit()
elif data["type"]=="available_devices":
self.devices_settings.emit(data["devices"],data["device_index"],data["volume"],data["is_normalized"],data["pan"],data["low frequency"],data["high frequency"])
elif data["type"] == "error":
self.error_signal.emit(data["error_message"])
except:
error_message = traceback.format_exc()
self.error_signal.emit(error_message)

class Manage_Input_Device_Child_Proc(Process):

def __init__(self, to_emitter, from_mother):
try:
super().__init__()
self.daemon = False
self.to_emitter = to_emitter
self.data_from_mother = from_mother

#local argument(s) s a v e < b r / > < b r / > e x c e p t : < b r / > t r y : < b r / > e r r o r _ m e s s a g e = s t r ( t r a c e b a c k . f o r m a t _ e x c ( ) ) < b r / > t o _ e m i t t e r . s e n d ( { & q u o t ; t y p e & q u o t ; : & q u o t ; e r r o r & q u o t ; , & q u o t ; e r r o r _ m e s s a g e & q u o t ; : e r r o r _ m e s s a g e } ) < b r / > e x c e p t : < b r / > p a s s < b r / > < b r / > d e f r u n ( s e l f ) : < b r / > t r y : < b r / > s e l f . f e t c h _ i n p u t _ s e t t i n g s ( ) < b r / > < b r / > < b r / > s e l f . b i t _ r a t e = 1 2 8 * 1 0 2 4 # 1 2 8 k b / s e c < b r / > s e l f . p a c k e t _ t i m e = 1 2 5 # 1 2 5 m s e c < b r / > # s e l f . p a c k e t _ t i m e = 1 2 5 * 4 4 1 0 0 / 3 2 7 6 8 < b r / > s e l f . p a c k e t _ s i z e = i n t ( 1 6 3 8 4 / 4 ) < b r / > # s e l f . n e w _ s a m p l e _ r a t e = 3 2 7 6 8 < b r / > s e l f . n e w _ s a m p l e _ r a t e = 4 4 1 0 0 < b r / > s e l f . T I M E _ W I N D O W = 3 0 0 0 < b r / > < b r / > < b r / > s e l f . f o r m a t = p y a u d i o . p a I n t 1 6 < b r / > s e l f . c h a n n e l s = 2 < b r / > < b r / > s e l f . i n p u t _ s t r e a m = N o n e < b r / > s e l f . o u t p u t _ s t r e a m = N o n e < b r / > s e l f . p l a y _ s t a t u s = & q u o t ; s t o p p e d & q u o t ; < b r / > s e l f . p r o c e s s _ t e r m i n a t e d = F a l s e < b r / > < b r / > w h i l e ( s e l f . p r o c e s s _ t e r m i n a t e d = = F a l s e ) : < b r / > i f s e l f . p l a y _ s t a t u s = = & q u o t ; s t o p p e d & q u o t ; : < b r / > d a t a = s e l f . d a t a _ f r o m _ m o t h e r . g e t ( ) < b r / > e l s e : < b r / > q _ s i z e = s e l f . d a t a _ f r o m _ m o t h e r . q s i z e ( ) < b r / > i f q _ s i z e & g t ; 0 : < b r / > d a t a = s e l f . d a t a _ f r o m _ m o t h e r . g e t ( ) < b r / > e l s e : < b r / > d a t a = N o n e < b r / > i f d a t a i s n o t N o n e : < b r / > i f d a t a [ & q u o t ; t y p e & q u o t ; ] = = & q u o t ; s t o p - p r o c e s s & q u o t ; : < b r / > s e l f . p r o c e s s _ t e r m i n a t e d = T r u e < b r / > r e t u r n 1 < b r / > i f d a t a [ & q u o t ; t y p e & q u o t ; ] = = & q u o t ; s a v e & q u o t ; : < b r / > d e v i c e _ n a m e = d a t a [ & q u o t ; d e v i c e _ n a m e & q u o t ; ] < b r / > v o l u m e = d a t a [ & q u o t ; v o l u m e & q u o t ; ] < b r / > i s _ n o r m a l i z e d = d a t a [ & q u o t ; i s _ n o r m a l i z e d & q u o t ; ] < b r / > pan = data["pan"]
low_frequency = data["low_frequency"]
high_frequency = data["high_frequency"]
self.save(device_name,volume,is_normalized,pan,low_frequency,high_frequency)
break
elif data["type"] == "test":
self.output_stream = self.p.open(format=pyaudio.paInt16,channels=self.channels,rate=self.new_sample_rate,output=True,output_device_index=self.output_device_index,frames_per_buffer=self.packet_size)

#self.output_stream = self.p.open(format=pyaudio.paInt16,channels=self.channels,rate=self.new_sample_rate,output=True,frames_per_buffer=self.packet_size)
self.output_stream.start_stream()
self.input_device_name = data["content"]
for input_device in self.input_devices:
if(data["content"]==input_device[2]):
self.input_device_index = input_device[1]

self.input_stream = self.p.open(format=pyaudio.paInt16,channels=1,rate=self.new_sample_rate,input=True,input_device_index=self.input_device_index,frames_per_buffer=self.packet_size)
self.input_stream.start_stream()
self.input_channels = 1

'''
try:
self.input_stream = self.p.open(format=pyaudio.paInt16,channels=self.channels,rate=self.new_sample_rate,input=True,input_device_index=self.input_device_index,frames_per_buffer=self.packet_size)
self.input_stream.start_stream()
self.input_channels = self.channels
except Exception as e:
#self.input_stream = self.p.open(format=pyaudio.paInt16,channels=1,rate=self.new_sample_rate,input=True,input_device_index=self.input_device_index,frames_per_buffer=self.packet_size)
self.input_stream = self.p.open(format=pyaudio.paInt16,channels=1,rate=self.new_sample_rate,input=True,input_device_index=self.input_device_index,frames_per_buffer=self.packet_size)
self.input_stream.start_stream()
self.input_channels = 1
'''
self.play_status = "playing"
self.chunk_number = 0
self.current_duration_milliseconds = 0
self.now = datetime.now()
self.x_vals = np.array([])
self.y_vals = np.array([])
elif data["type"] == "stop":
self.play_status = "stopped"
self.chunk_number = 0
self.current_duration_milliseconds = 0
try:
self.output_stream.stop_stream()
self.output_stream.close()
self.input_stream.stop_stream()
self.input_stream.close()
except:
pass
self.now = datetime.now()
self.x_vals = np.array([])
self.y_vals = np.array([])
elif data["type"] == "volume":
self.volume = data["value_base_100"]
elif data["type"] == "is_normalized":
self.is_normalized = data["boolean_value"]
elif data["type"] == "pan":
self.pan = data["pan_value"]
elif data["type"] == "low_frequency":
self.low_frequency = data["low_frequency_value"]
elif data["type"] == "high frequency":
self.high_frequency = data["high_frequency_value"]

if self.play_status=="playing":
in_data = self.input_stream.read(self.packet_size,exception_on_overflow = False)

if self.input_channels == 2:
slice = AudioSegment(in_data, sample_width=2, frame_rate=self.new_sample_rate, channels=2)
else:
slice = AudioSegment(in_data, sample_width=2, frame_rate=self.new_sample_rate, channels=1)
slice = AudioSegment.from_mono_audiosegments(slice, slice)

if self.pan!=0:
slice = slice.pan(self.pan/100)
if self.low_frequency>20:
slice = effects.high_pass_filter(slice, self.low_frequency)
if self.high_frequency>20000:
slice = effects.low_pass_filter(slice, self.high_frequency)
if(self.volume==0):
db_volume = -200
else:
db_volume = 20*math.log10(self.volume/100)
slice = slice+db_volume
if self.is_normalized:
slice = self.normalize_method(slice,0.1)
self.output_stream.write(slice.raw_data)

free = self.output_stream.get_write_available()
if free > self.packet_size: # Is there a lot of space in the buffer?
tofill = free - self.packet_size
silence = chr(0)*tofill*self.channels*2
self.output_stream.write(silence) # Fill it with silence
#free = self.output_stream.get_write_available()
#print(free)

chunk_time = len(slice)
samples = slice.get_array_of_samples()
left_samples = samples[::2]
right_samples = samples[1::2]
left_audio_data = np.frombuffer(left_samples, np.int16)[::16] #down sampling
right_audio_data = np.frombuffer(right_samples, np.int16)[::16] #down sampling
audio_data = np.vstack((left_audio_data,right_audio_data)).ravel('F')

time_data = np.array([])
for i in range(0,len(audio_data)):
time_data = np.append(time_data, self.now)
self.now = self.now+timedelta(milliseconds=chunk_time/len(audio_data))

self.x_vals = np.concatenate((self.x_vals, time_data))
self.y_vals = np.concatenate((self.y_vals, audio_data))

if(self.x_vals.size>audio_data.size*(self.TIME_WINDOW/chunk_time)):
self.x_vals = self.x_vals[audio_data.size:]
self.y_vals = self.y_vals[audio_data.size:]

average_data_value = slice.max
normalized_value = abs(average_data_value)/slice.max_possible_amplitude
if normalized_value>1:
normalized_value = 1
if self.play_status == "stopped":
normalized_value = 0

self.to_emitter.send({"type":"plot_data","plot_data":[self.x_vals,self.y_vals],"normalized_value":normalized_value})

self.now = datetime.now()

self.chunk_number += 1
self.current_duration_milliseconds += chunk_time
except:
error_message = str(traceback.format_exc())
print(error_message)
self.to_emitter.send({"type":"error","error_message":error_message})

def normalize_method(self,seg, headroom):
try:
peak_sample_val = seg.max

# if the max is 0, this audio segment is silent, and can't be normalized
if peak_sample_val == 0:
return seg

target_peak = seg.max_possible_amplitude * utils.db_to_float(-headroom)
#target_peak = seg.max_possible_amplitude * (percent_headroom)

needed_boost = utils.ratio_to_db(target_peak / peak_sample_val)
return seg.apply_gain(needed_boost)
except:
error_message = traceback.format_exc()
self.to_emitter.send({"type":"error","error_message":error_message})
return seg


Подробнее здесь: https://stackoverflow.com/questions/781 ... from-proce
Реклама
Ответить Пред. темаСлед. тема

Быстрый ответ

Изменение регистра текста: 
Смайлики
:) :( :oops: :roll: :wink: :muza: :clever: :sorry: :angel: :read: *x)
Ещё смайлики…
   
К этому ответу прикреплено по крайней мере одно вложение.

Если вы не хотите добавлять вложения, оставьте поля пустыми.

Максимально разрешённый размер вложения: 15 МБ.

  • Похожие темы
    Ответы
    Просмотры
    Последнее сообщение

Вернуться в «Python»