ссылка на блокнот Google-collab: https://colab.research.google.com/drive ... sp=sharing[enter описание изображения здесь](https://i.sstatic.net/GswZOriQ.jpg) введите описание изображения здесьhttps://colab.research.google.com/drive/16VDTTcCGsQrpURZXCxL5695jZ5Xxze4O?usp=sharing
Код: Выделить всё
def ctc_loss(y_true, y_pred):
"""
Compute the CTC loss between true labels and predicted outputs.
Args:
y_true: Ground-truth labels (int32/int64).
y_pred: Model predictions (logits).
Returns:
Mean CTC loss across the batch.
"""
# Ensure y_true is of integer type
y_true = tf.cast(y_true, dtype=tf.int32)
input_length = tf.fill([tf.shape(y_pred)[0]], tf.shape(y_pred)[1]) # Time steps
label_length = tf.reduce_sum(tf.cast(y_true != -1, tf.int32), axis=1) # Non-padding label lengths
# Compute CTC loss
loss = tf.nn.ctc_loss(
labels=y_true,
logits=y_pred,
label_length=label_length,
logit_length=input_length,
blank_index=-1, # Specify the blank index used for padding
logits_time_major=False # Logits are batch-major
)
return tf.reduce_mean(loss)
# Optimizer with Learning Rate Scheduler
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=1e-3,
decay_steps=10000,
decay_rate=0.9
)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule, clipnorm=1.0)
model.compile(optimizer=optimizer, loss=ctc_loss)
# Model Training
history = model.fit(x_train, y_train, batch_size=32, epochs=10, validation_data=(x_val, y_val))
# Visualize Loss
import matplotlib.pyplot as plt
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.legend()
plt.title("Training and Validation Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.show()
Подробнее здесь: https://stackoverflow.com/questions/792 ... l-training
Мобильная версия