Под статическим обучением я подразумеваю выборочную ось=0, которую можно использовать для применения динамического GAN к периодическому подходу, как на этом этапе обучения ниже, где длина периода равна 3.< /p>
First sample is using real sample, preventing backward in generator, allowing backward in discriminator.
Second sample is using generated sample, preventing backward in generator, allowing backward in discriminator.
Third sample is using generated sample, allowing backward in generator, preventing backward in discriminator, but redirect gradient to generator
Fourth sample is basically like first sample.
etc...
Для этого я определил уровни потока управления, они состоят из StopGradient, ControlForward, ControlBackward и RedirectGradient. >
#@title Control Flow Layers Definition
@keras_export("keras.layers.StopGradient")
class StopGradient(layers.Layer):
"""Layer to stop gradient flow during backward propagation.
In another word, only allowing forward propagation.
It takes a single tensor, and returns a single tensor (also of the same shape).
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def call(self, inputs):
return layers.Lambda(lambda x: ops.stop_gradient(x))(inputs)
def compute_output_shape(self, input_shape):
return input_shape
@keras_export("keras.layers.ControlForward")
class ControlForward(layers.Layer):
"""Layer to control which layer should be passed during forward propagation between two layers based on control mask.
It takes as input a list of tensors.
The first tensor is control mask with shape (None, 1), while the second and third tensors are candidates between true or false, respectively.
Both candidates must be the same.
Returns a single tensor (also of the same shape like candidate layer).
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def call(self, inputs):
mask, layer_if_true, layer_if_false = inputs
# Cast the mask to the same type like candidate layer
mask = layers.Lambda(lambda x: ops.cast(x, dtype=layer_if_true.dtype))(mask)
mask_h = layers.Lambda(lambda x: ops.logical_not(x))(mask)
# Apply the layer if true
layer_if_true = layers.Multiply()([layer_if_true, mask])
# Apply the layer if false
layer_if_false = layers.Multiply()([layer_if_false, mask_h])
return layers.Add()([layer_if_true, layer_if_false])
def compute_output_shape(self, input_shape):
mask_shape, layer_if_true_shape, layer_if_false_shape = input_shape
# Mask shape must be (None, 1)
if mask_shape != (None, 1):
raise ValueError("Control mask shape must be (None, 1), but got {}".format(mask_shape))
# Make sure both layers have the same shape
if layer_if_true_shape != layer_if_false_shape:
raise ValueError("Both candidates must be the same shape., but got {} and {}".format(layer_if_true_shape, layer_if_false_shape))
return layer_if_true_shape
@keras_export("keras.layers.ControlBackward")
class ControlBackward(layers.Layer):
"""Layer to control whether a layer should be passed during backward propagation based on control mask.
This is also called controlled stop gradient, except it contains a control mask.
The control mask is following the principle of OR logic.
Which means if one of them is true, then there must be weight changes in the previous layer.
It takes as input a list of tensors.
The first tensor is control mask with shape (None, 1), while the second is previous layer.
Returns a single tensor (also of the same shape like previous layer).
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def call(self, inputs):
mask, prev_layer = inputs
# Cast the mask to the same type as prev_layer
mask = layers.Lambda(lambda x: ops.cast(x, dtype=prev_layer.dtype))(mask)
mask_h = layers.Lambda(lambda x: ops.logical_not(x))(mask)
# Apply the stop_gradient function on the masked parts
stopped_gradient_part = layers.Lambda(lambda x: ops.stop_gradient(x))(prev_layer)
# Multiply stopped gradient part with mask_h
stopped_gradient_masked = layers.Multiply()([stopped_gradient_part, mask_h])
# Multiply normal (non-stopped) part with mask
non_stopped_gradient_part = layers.Multiply()([prev_layer, mask])
# Add the stopped and non-stopped parts
return layers.Add()([stopped_gradient_masked, non_stopped_gradient_part])
def compute_output_shape(self, input_shape):
mask_shape, prev_layer_shape = input_shape
# Mask shape must be (None, 1)
if mask_shape != (None, 1):
raise ValueError("Mask control shape must be (None, 1), but got {}".format(mask_shape))
return prev_layer_shape
@keras_export("keras.layers.RedirectGradient")
class RedirectGradient(layers.Layer):
"""
Layer to allow forward propagation through one layer and redirect gradients to another layer.
During forward propagation, the inputs pass through the forward layer.
During backward propagation, the gradients are redirected to the backward layer.
This acts like a gradient flow jumper.
It takes as input a list of tensors.
The first tensor is the forward layer, while the second is the backward layer.
Returns a single tensor (also of the same shape like forward layer).
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def call(self, inputs):
forward_layer, backward_layer = inputs
# Forward pass: pass the inputs through the forward layer
forward_pass = forward_layer
# Stop gradient for the forward layer (forward_layer should not propagate gradients)
forward_stopped = layers.Lambda(lambda x: ops.stop_gradient(x))(forward_pass)
# Gradients are applied to the backward_layer
# This ensures that only backward_layer gets the gradients during backpropagation
backward_gradients = backward_layer
# Combine forward propagation and backward gradient redirection
return layers.Add()([forward_stopped, backward_gradients])
def compute_output_shape(self, input_shape):
forward_layer_shape, _ = input_shape
return forward_layer_shape
Поэтому я могу определить статический GAN следующим образом:
Итак, как же тренировать статический GAN? Я знаю динамический подход, основанный на этой документации.
Я также протестировал уровни потока управления, которые работали должным образом.
Так что же не так с моим настроить?
Под статическим обучением я подразумеваю выборочную ось=0, которую можно использовать для применения динамического GAN к периодическому подходу, как на этом этапе обучения ниже, где длина периода равна 3.< /p> [code]First sample is using real sample, preventing backward in generator, allowing backward in discriminator. Second sample is using generated sample, preventing backward in generator, allowing backward in discriminator. Third sample is using generated sample, allowing backward in generator, preventing backward in discriminator, but redirect gradient to generator Fourth sample is basically like first sample. etc... [/code] Для этого я определил уровни потока управления, они состоят из StopGradient, ControlForward, ControlBackward и RedirectGradient. > [code]#@title Control Flow Layers Definition
@keras_export("keras.layers.StopGradient") class StopGradient(layers.Layer): """Layer to stop gradient flow during backward propagation.
In another word, only allowing forward propagation.
It takes a single tensor, and returns a single tensor (also of the same shape). """
@keras_export("keras.layers.ControlForward") class ControlForward(layers.Layer): """Layer to control which layer should be passed during forward propagation between two layers based on control mask.
It takes as input a list of tensors. The first tensor is control mask with shape (None, 1), while the second and third tensors are candidates between true or false, respectively. Both candidates must be the same. Returns a single tensor (also of the same shape like candidate layer). """
# Cast the mask to the same type like candidate layer mask = layers.Lambda(lambda x: ops.cast(x, dtype=layer_if_true.dtype))(mask) mask_h = layers.Lambda(lambda x: ops.logical_not(x))(mask)
# Apply the layer if true layer_if_true = layers.Multiply()([layer_if_true, mask])
# Apply the layer if false layer_if_false = layers.Multiply()([layer_if_false, mask_h])
# Mask shape must be (None, 1) if mask_shape != (None, 1): raise ValueError("Control mask shape must be (None, 1), but got {}".format(mask_shape))
# Make sure both layers have the same shape if layer_if_true_shape != layer_if_false_shape: raise ValueError("Both candidates must be the same shape., but got {} and {}".format(layer_if_true_shape, layer_if_false_shape))
return layer_if_true_shape
@keras_export("keras.layers.ControlBackward") class ControlBackward(layers.Layer): """Layer to control whether a layer should be passed during backward propagation based on control mask.
This is also called controlled stop gradient, except it contains a control mask. The control mask is following the principle of OR logic. Which means if one of them is true, then there must be weight changes in the previous layer.
It takes as input a list of tensors. The first tensor is control mask with shape (None, 1), while the second is previous layer. Returns a single tensor (also of the same shape like previous layer). """
# Cast the mask to the same type as prev_layer mask = layers.Lambda(lambda x: ops.cast(x, dtype=prev_layer.dtype))(mask) mask_h = layers.Lambda(lambda x: ops.logical_not(x))(mask)
# Apply the stop_gradient function on the masked parts stopped_gradient_part = layers.Lambda(lambda x: ops.stop_gradient(x))(prev_layer)
# Multiply stopped gradient part with mask_h stopped_gradient_masked = layers.Multiply()([stopped_gradient_part, mask_h])
# Multiply normal (non-stopped) part with mask non_stopped_gradient_part = layers.Multiply()([prev_layer, mask])
# Add the stopped and non-stopped parts return layers.Add()([stopped_gradient_masked, non_stopped_gradient_part])
# Mask shape must be (None, 1) if mask_shape != (None, 1): raise ValueError("Mask control shape must be (None, 1), but got {}".format(mask_shape))
return prev_layer_shape
@keras_export("keras.layers.RedirectGradient") class RedirectGradient(layers.Layer): """ Layer to allow forward propagation through one layer and redirect gradients to another layer.
During forward propagation, the inputs pass through the forward layer. During backward propagation, the gradients are redirected to the backward layer. This acts like a gradient flow jumper.
It takes as input a list of tensors. The first tensor is the forward layer, while the second is the backward layer. Returns a single tensor (also of the same shape like forward layer). """
# Forward pass: pass the inputs through the forward layer forward_pass = forward_layer
# Stop gradient for the forward layer (forward_layer should not propagate gradients) forward_stopped = layers.Lambda(lambda x: ops.stop_gradient(x))(forward_pass)
# Gradients are applied to the backward_layer # This ensures that only backward_layer gets the gradients during backpropagation backward_gradients = backward_layer
return forward_layer_shape [/code] Поэтому я могу определить статический GAN следующим образом: [code]''' forward_mask: allow real sample if 1, else generated sample backward_mask: allow propagation of generator if 1, else stop propagation '''
model = Model(inputs=[real_input_layer, latent_input_layer, forward_mask, backward_mask, backward_mask_inverse], outputs=[generator_output_layer, discriminator_output_layer])
model.compile(optimizer="adam", loss=["binary_crossentropy", "binary_crossentropy"], loss_weights=[1, 1]) model.summary() [/code] Где набор данных и соответствующая маска управления, например: [code]# seed 42 np.random.seed(42)
Имея некоторый опыт разработки нейронных сетей в Керасе, я решил написать нестандартный GAN, который таковым и не назовешь. Дело в том, что дискриминатор — это готовая нейросеть, которая прекрасно предсказывает качество изображения лица, а я хотел,...
Я пытаюсь статически собрать htpasswd, но не получилось
Я не знаю, как правильно настроить ./, я пробовал много аргументов о «статическом», но все равно не получилось
я узнал и жил с тем фактом, что C ++ - это статически напечатанный язык. Однако тега вики на переполненных состояниях стека (как в его короткой форме, так и в полной форме):
c ++-это (в основном) статически типичный Язык программирования
У меня есть набор данных, имеющий форму (783126,3), где в моем наборе данных есть три столбца R1, R2 и Оценка, представляющие Остаток 1, Остаток 2 и их Оценка. Я хочу обучить нейронную сеть графа на своих данных, чтобы я мог проецировать данные в...
Я создаю модель распознавания лиц. Я уже обучил модель, используя образы двух человек (Криштиану Роналду и Лионеля Месси). Теперь я хочу добавить в модель больше людей (например, Марию Шарапову), не переучивая все с нуля.
Есть ли способ обучить...