Я пытаюсь обучить модель CNN с изображениями размером 256*256. Я буду использовать эту модель для своей сверточной нейронной сети, которая будет использоваться для работы со старыми художественными изображениями.
У меня также есть разное количество изображений в этих файлах (в наборе данных train и наборе данных val)
это код:
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
model = Sequential()
# Add layers
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(256, 256, 3)))
#filtesr: (int) the number of filters in the convolution.
#kernel_size: (int or two intager numbers) specifying the size of onvolution window.
#activation: activation function, if None no actevation function done.
#input_shape: if data_format='channel_last' A 4D tensor with shape: (batch_size, height, width, channels)
model.add(MaxPooling2D(pool_size=(2, 2)))
#pooling_size: (int or two intager nubmbers) factors by which to downscale (dim1, dim2).
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
#Flattens the input. Does not affect the batch size.
model.add(Dense(units=256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(units=10, activation='softmax')) # Adjust output units for the number of classes
# Compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
#optimizer: String (name of optimizer) or optimizer instance.
#loss: (string) name of loss function
#metrics: List of metrics to be evaluated by the model during training and testing.
# Define paths
train_dir = 'C:/Users/M-LY/Desktop/dataset/train'
val_dir = 'C:/Users/M-LY/Desktop/dataset/val'
# Create ImageDataGenerator for data augmentation and rescaling
train_datagen = ImageDataGenerator(rescale=1./255) # Normalize pixel values to [0, 1]
#image data genorator for train
val_datagen = ImageDataGenerator(rescale=1./255)
# Load images from directories
train_generator = train_datagen.flow_from_directory(
train_dir, #path to train data
target_size=(256, 256), # Resize images to match model input
batch_size=32,
class_mode='binary') # Use 'categorical' for multi-class classification, and 'binary' for only two classes classification
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(256, 256),
batch_size=32,
class_mode='binary')
# Train the model
history = model.fit(
train_generator,
steps_per_epoch=train_generator.samples // train_generator.batch_size, #train_generator.sample=440 #13
validation_data=val_generator,
validation_steps=val_generator.samples // val_generator.batch_size, #6
epochs=50) # Adjust the number of epochs as needed
test_dir = 'C:/Users/M-LY/Desktop/dataset/test'
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(256, 256),
batch_size=32,
class_mode='binary')
# Evaluate the model
test_loss, test_accuracy = model.evaluate(test_generator)
print(f"Test Accuracy: {test_accuracy * 100:.2f}%")
model.save('cnn_model.h5') # Save model to file
Found 440 images belonging to 2 classes.
Found 211 images belonging to 2 classes.
C:\Users\M-LY\bs\gradbb\.venv\Lib\site-packages\keras\src\trainers\data_adapters\py_dataset_adapter.py:121: UserWarning: Your `PyDataset` class should call `super().__init__(**kwargs)` in its constructor. `**kwargs` can include `workers`, `use_multiprocessing`, `max_queue_size`. Do not pass these arguments to `fit()`, as they will be ignored.
self._warn_if_super_not_called()
Epoch 1/50
Traceback (most recent call last):
File "c:\Users\M-LY\bs\gradbb\network.py", line 64, in
history = model.fit(
^^^^^^^^^^
File "C:\Users\M-LY\bs\gradbb\.venv\Lib\site-packages\keras\src\utils\traceback_utils.py", line 122, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\M-LY\bs\gradbb\.venv\Lib\site-packages\keras\src\backend\tensorflow\nn.py", line 623, in categorical_crossentropy
raise ValueError(
ValueError: Arguments `target` and `output` must have the same rank (ndim). Received: target.shape=(None,), output.shape=(None, 10)
Я не знаю, в чем проблема с функцией или она в другой части кода. Я новичок в тензорном потоке, и он мне нужен для дипломного проекта. ПОМОЩЬ
Я пытаюсь обучить модель CNN с изображениями размером 256*256. Я буду использовать эту модель для своей сверточной нейронной сети, которая будет использоваться для работы со старыми художественными изображениями. У меня также есть разное количество изображений в этих файлах (в наборе данных train и наборе данных val) это код: [code]import os import numpy as np import tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
model = Sequential()
# Add layers model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(256, 256, 3))) #filtesr: (int) the number of filters in the convolution.
#kernel_size: (int or two intager numbers) specifying the size of onvolution window. #activation: activation function, if None no actevation function done. #input_shape: if data_format='channel_last' A 4D tensor with shape: (batch_size, height, width, channels)
model.add(MaxPooling2D(pool_size=(2, 2))) #pooling_size: (int or two intager nubmbers) factors by which to downscale (dim1, dim2).
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) #Flattens the input. Does not affect the batch size.
model.add(Dense(units=256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(units=10, activation='softmax')) # Adjust output units for the number of classes
# Compile the model model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) #optimizer: String (name of optimizer) or optimizer instance. #loss: (string) name of loss function #metrics: List of metrics to be evaluated by the model during training and testing.
# Create ImageDataGenerator for data augmentation and rescaling train_datagen = ImageDataGenerator(rescale=1./255) # Normalize pixel values to [0, 1]
#image data genorator for train val_datagen = ImageDataGenerator(rescale=1./255)
# Load images from directories train_generator = train_datagen.flow_from_directory( train_dir, #path to train data target_size=(256, 256), # Resize images to match model input batch_size=32, class_mode='binary') # Use 'categorical' for multi-class classification, and 'binary' for only two classes classification
# Train the model history = model.fit( train_generator, steps_per_epoch=train_generator.samples // train_generator.batch_size, #train_generator.sample=440 #13 validation_data=val_generator, validation_steps=val_generator.samples // val_generator.batch_size, #6 epochs=50) # Adjust the number of epochs as needed test_dir = 'C:/Users/M-LY/Desktop/dataset/test'
# Evaluate the model test_loss, test_accuracy = model.evaluate(test_generator) print(f"Test Accuracy: {test_accuracy * 100:.2f}%")
model.save('cnn_model.h5') # Save model to file
[/code] но продолжает выдавать эту ошибку: [code]Found 440 images belonging to 2 classes. Found 211 images belonging to 2 classes. C:\Users\M-LY\bs\gradbb\.venv\Lib\site-packages\keras\src\trainers\data_adapters\py_dataset_adapter.py:121: UserWarning: Your `PyDataset` class should call `super().__init__(**kwargs)` in its constructor. `**kwargs` can include `workers`, `use_multiprocessing`, `max_queue_size`. Do not pass these arguments to `fit()`, as they will be ignored. self._warn_if_super_not_called() Epoch 1/50 Traceback (most recent call last): File "c:\Users\M-LY\bs\gradbb\network.py", line 64, in history = model.fit( ^^^^^^^^^^ File "C:\Users\M-LY\bs\gradbb\.venv\Lib\site-packages\keras\src\utils\traceback_utils.py", line 122, in error_handler raise e.with_traceback(filtered_tb) from None File "C:\Users\M-LY\bs\gradbb\.venv\Lib\site-packages\keras\src\backend\tensorflow\nn.py", line 623, in categorical_crossentropy raise ValueError( ValueError: Arguments `target` and `output` must have the same rank (ndim). Received: target.shape=(None,), output.shape=(None, 10) [/code] Я не знаю, в чем проблема с функцией или она в другой части кода. Я новичок в тензорном потоке, и он мне нужен для дипломного проекта. ПОМОЩЬ