Отрицательные потери не уменьшаются в нейронной сетиPython

Программы на Python
Ответить Пред. темаСлед. тема
Anonymous
 Отрицательные потери не уменьшаются в нейронной сети

Сообщение Anonymous »

У меня есть код нейронной сети, созданный на Python. Когда я проверяю убыток с его помощью, положительный убыток уменьшается, как и ожидалось, но отрицательный убыток увеличивается. Может ли кто-нибудь помочь мне определить, что я здесь делаю неправильно?
import numpy as np

inputs = 2
outputs = 1
hiddenUnits = 4

# Initialize z1 as a global variable
z1 = np.array([]) # This will be set in feedForward
z2 = np.array([]) # This will be set in feedForward
dw2 = np.array([]) # This will be set in feedForward
dw1 = np.array([]) # This will be set in feedForward
w1 = np.array([]) # This will be set in feedForward
w2 = np.array([]) # This will be set in feedForward

#-----------------------------------------------------------------------------
# SANITIZE INPUTS FOR PROPER STRUCTURE
#-----------------------------------------------------------------------------

def sanitizeInputGrouping(inp1, inp2):
outputArray = []
for index in range(len(inp1)):
outputArray.append(np.array([inp1[index], inp2[index]]))
return np.array(outputArray)

def sigmoidActivation(z):
return 1 / (1 + np.exp(-z))

def sigmoidDerivative(z):
return sigmoidActivation(z) * (1 - sigmoidActivation(z))

#-----------------------------------------------------------------------------
# FEED FORWARD
#-----------------------------------------------------------------------------

def feedForward(x):
global z1 # Make z1 global
print("---------------------------------------------------------------------------")
print("FEED FORWARD")
print("---------------------------------------------------------------------------")
a1 = np.dot(x, w1) # (9, 2) * (2, 4) = (9, 4)
z1 = sigmoidActivation(a1) # z1 will be a (9, 4) array
a2 = np.dot(z1, w2) # (9, 4) * (4, 1) = (9, 1)
z2 = sigmoidActivation(a2)
#print(f"X : {x} with Size : {x.shape}")
#print(f"w1 : {w1} with Size : {w1.shape}")
#print(f"a1 : {a1} with Size : {a1.shape}")
#print(f"z1 : {z1} with Size : {z1.shape}")
#print(f"a2 : {a2} with Size : {a2.shape}")
#print(f"z2 : {z2} with Size : {z2.shape}")
return z2

#-----------------------------------------------------------------------------
# BACK PROPOGATION
#-----------------------------------------------------------------------------
def backPropogation(receivedOpt, actualOpt):
global z1 # Ensure we're using the global
global dw2 # Ensure we're using the global
global dw1 # Ensure we're using the global

# Calculate delta2 , dz2 and dw2
delta2 = actualOpt.reshape(-1 , 1) - receivedOpt
dz2 = delta2 * sigmoidDerivative(receivedOpt).reshape(-1, 1)
dw2 = (1 / inputs) * np.dot(z1.T, dz2)

# Calculate delta1 , dz1 and dw1
delta1 = np.dot(dz2, w2.T)
dz1 = delta1 * sigmoidDerivative(z1)
dw1 = (1 / inputs) * np.dot(inp.T, dz1)

print("---------------------------------------------------------------------------")
print("BACK PROPAGATION")
print("---------------------------------------------------------------------------")
#print(f"actualOpt : {actualOpt.reshape(-1 , 1)} with Size : {actualOpt.reshape(-1 , 1).shape}")
#print(f"receivedOpt : {receivedOpt} with Size : {receivedOpt.shape}")
print(f"loss : {delta2} with Size : {delta2.shape}")
#print(f"dz2 : {dz2} with Size : {dz2.shape}")
#print(f"dw2 : {dw2} with Size : {dw2.shape}")
#print(f"delta1 : {delta1} with Size : {delta1.shape}")
#print(f"dw1 : {dw1} with Size : {dw1.shape}")

#-----------------------------------------------------------------------------
# UPDATE WEIGHTS
#-----------------------------------------------------------------------------
def updateWeights():
global w1
global w2
learningRate = 1.2
w1 = w1 - dw1
w2 = w2 - dw2

#-----------------------------------------------------------------------------
# START
#-----------------------------------------------------------------------------

# INITIALIZE WEIGHTS
inp1 = np.array([3, 2, 4, 3, 3.5, 2, 5.5, 1, 4.5])
inp2 = np.array([1.5, 1, 1.5, 1, 0.5, 0.5, 1, 1, 1])
opt = np.array([1, 0, 1, 0, 1, 0, 1, 0, 1])
w1 = np.random.randn(inputs, hiddenUnits) # (2, 4)
w2 = np.random.randn(hiddenUnits, outputs) # (4, 1)
inp = sanitizeInputGrouping(inp1, inp2) # (9, 2)
iterations = 10

print("---------------------------------------------------------------------------")
print("INPUTS")
print("---------------------------------------------------------------------------")
#print("inp1 values: ", inp1)
#print("inp2 values: ", inp2)
#print("W1 values: ", w1)
#print("W2 values: ", w2)
#print("INP values: ", inp)

for i in range(iterations):
op1 = feedForward(inp)
backPropogation(op1, opt)
updateWeights()

#print("W1 values: ", w1)
#print("W2 values: ", w2)

Ниже приведены выходные данные потерь:
---------------------------------------------------------------------------
BACK PROPAGATION
---------------------------------------------------------------------------
loss :
[[ 0.28916199]
[-0.70778902]
[ 0.29042354]
[-0.7129202 ]
[ 0.28722938]
[-0.7142503 ]
[ 0.29320588]
[-0.63740073]
[ 0.29106341]] with Size : (9, 1)
---------------------------------------------------------------------------
BACK PROPAGATION
---------------------------------------------------------------------------
loss :
[[ 0.27641759]
[-0.7212521 ]
[ 0.27906059]
[-0.7244842 ]
[ 0.27748146]
[-0.72626236]
[ 0.28467634]
[-0.65693775]
[ 0.28142508]] with Size : (9, 1)
---------------------------------------------------------------------------
BACK PROPAGATION
---------------------------------------------------------------------------
loss :
[[ 0.25948942]
[-0.73795286]
[ 0.26278487]
[-0.73990582]
[ 0.26378495]
[-0.74104634]
[ 0.27035703]
[-0.67968598]
[ 0.26674857]] with Size : (9, 1)
---------------------------------------------------------------------------
BACK PROPAGATION
---------------------------------------------------------------------------
loss :
[[ 0.23606744]
[-0.75962731]
[ 0.23873627]
[-0.76151805]
[ 0.24349145]
[-0.76028777]
[ 0.24652784]
[-0.70695612]
[ 0.24383071]] with Size : (9, 1)
---------------------------------------------------------------------------
BACK PROPAGATION
---------------------------------------------------------------------------
loss :
[[ 0.20316772]
[-0.78875041]
[ 0.20342506]
[-0.79253486]
[ 0.21283419]
[-0.7865251 ]
[ 0.2087842 ]
[-0.74069295]
[ 0.20860191]] with Size : (9, 1)
---------------------------------------------------------------------------
BACK PROPAGATION
---------------------------------------------------------------------------
loss :
[[ 0.15895445]
[-0.82777752]
[ 0.15559616]
[-0.83552496]
[ 0.16860758]
[-0.82267244]
[ 0.15651604]
[-0.78314906]
[ 0.15931006]] with Size : (9, 1)
---------------------------------------------------------------------------
BACK PROPAGATION
---------------------------------------------------------------------------
loss :
[[ 0.10779256]
[-0.87580435]
[ 0.10234363]
[-0.88719489]
[ 0.11442314]
[-0.86904615]
[ 0.10036324]
[-0.83485581]
[ 0.10381154]] with Size : (9, 1)
---------------------------------------------------------------------------
BACK PROPAGATION
---------------------------------------------------------------------------
loss :
[[ 0.0620474 ]
[-0.92402256]
[ 0.05735878]
[-0.93486044]
[ 0.06487198]
[-0.91805822]
[ 0.05475579]
[-0.89041349]
[ 0.05739927]] with Size : (9, 1)

---------------------------------------------------------------------------
BACK PROPAGATION
---------------------------------------------------------------------------
loss :
[[ 0.0310561 ]
[-0.96055429]
[ 0.02806986]
[-0.96749881]
[ 0.03181618]
[-0.95695369]
[ 0.02587254]
[-0.93837127]
[ 0.02771649]] with Size : (9, 1)
---------------------------------------------------------------------------
BACK PROPAGATION
---------------------------------------------------------------------------
loss :
[[ 0.01430976]
[-0.98182264]
[ 0.01296531]
[-0.98513183]
[ 0.01444208]
[-0.98020197]
[ 0.01204377]
[-0.97034898]
[ 0.01275771]] with Size : (9, 1)


Подробнее здесь: https://stackoverflow.com/questions/791 ... al-network
Реклама
Ответить Пред. темаСлед. тема

Быстрый ответ

Изменение регистра текста: 
Смайлики
:) :( :oops: :roll: :wink: :muza: :clever: :sorry: :angel: :read: *x)
Ещё смайлики…
   
К этому ответу прикреплено по крайней мере одно вложение.

Если вы не хотите добавлять вложения, оставьте поля пустыми.

Максимально разрешённый размер вложения: 15 МБ.

  • Похожие темы
    Ответы
    Просмотры
    Последнее сообщение

Вернуться в «Python»