Skip to main content
added 2 characters in body; edited tags
Source Link
desertnaut
  • 60.9k
  • 32
  • 158
  • 184

I implemented a custom training loop for a custom loss function that also incorporates the constraints of an unsupervised parametric optimization problem. The corresponding training loop creates then multiple epoch_loss outputs (the amount of the loss outputs is the same as the number of elements in ab_train). What should I do to obtain just one epoch_loss during each epoch/iteration?

train_loss_results = []

num_epochs = 200

def loss(model,ab):
    xyz_pred = model(ab)
    return penalty_function(xyz_pred,ab)

def grad(model,ab,xyz):
    with tf.GradientTape() as tape:
        loss_value = loss(model,ab)
    return loss_value, tape.gradient(loss_value,model.trainable_variables)

opt = tf.keras.optimizers.Adam(learning_rate=0.0001)

for epoch in range(num_epochs):        
    for ab in ab_train:        
        ab = ab.reshape(1,2)
        xyz_pred_us = model(ab) #forward pass
        loss_value,grads = grad(model,ab_train,xyz_pred_us)
        
        opt.apply_gradients(zip(grads, model.trainable_variables))
        epoch_loss = loss_value
        train_loss_results.append(epoch_loss)
    print("Epoch {}: Loss: {}".format(epoch,epoch_loss))
train_loss_results = []

num_epochs = 200

def loss(model,ab):
    xyz_pred = model(ab)
    return penalty_function(xyz_pred,ab)

def grad(model,ab,xyz):
    with tf.GradientTape() as tape:
        loss_value = loss(model,ab)
    return loss_value, tape.gradient(loss_value,model.trainable_variables)

opt = tf.keras.optimizers.Adam(learning_rate=0.0001)

for epoch in range(num_epochs):        
    for ab in ab_train:        
        ab = ab.reshape(1,2)
        xyz_pred_us = model(ab) #forward pass
        loss_value,grads = grad(model,ab_train,xyz_pred_us)
        
        opt.apply_gradients(zip(grads, model.trainable_variables))
        epoch_loss = loss_value
        train_loss_results.append(epoch_loss)
    print("Epoch {}: Loss: {}".format(epoch,epoch_loss))

I implemented a custom training loop for a custom loss function that also incorporates the constraints of an unsupervised parametric optimization problem. The corresponding training loop creates then multiple epoch_loss outputs (the amount of the loss outputs is the same as the number of elements in ab_train). What should I do to obtain just one epoch_loss during each epoch/iteration?

train_loss_results = []

num_epochs = 200

def loss(model,ab):
    xyz_pred = model(ab)
    return penalty_function(xyz_pred,ab)

def grad(model,ab,xyz):
    with tf.GradientTape() as tape:
        loss_value = loss(model,ab)
    return loss_value, tape.gradient(loss_value,model.trainable_variables)

opt = tf.keras.optimizers.Adam(learning_rate=0.0001)

for epoch in range(num_epochs):        
    for ab in ab_train:        
        ab = ab.reshape(1,2)
        xyz_pred_us = model(ab) #forward pass
        loss_value,grads = grad(model,ab_train,xyz_pred_us)
        
        opt.apply_gradients(zip(grads, model.trainable_variables))
        epoch_loss = loss_value
        train_loss_results.append(epoch_loss)
    print("Epoch {}: Loss: {}".format(epoch,epoch_loss))

I implemented a custom training loop for a custom loss function that also incorporates the constraints of an unsupervised parametric optimization problem. The corresponding training loop creates then multiple epoch_loss outputs (the amount of the loss outputs is the same as the number of elements in ab_train). What should I do to obtain just one epoch_loss during each epoch/iteration?

train_loss_results = []

num_epochs = 200

def loss(model,ab):
    xyz_pred = model(ab)
    return penalty_function(xyz_pred,ab)

def grad(model,ab,xyz):
    with tf.GradientTape() as tape:
        loss_value = loss(model,ab)
    return loss_value, tape.gradient(loss_value,model.trainable_variables)

opt = tf.keras.optimizers.Adam(learning_rate=0.0001)

for epoch in range(num_epochs):        
    for ab in ab_train:        
        ab = ab.reshape(1,2)
        xyz_pred_us = model(ab) #forward pass
        loss_value,grads = grad(model,ab_train,xyz_pred_us)
        
        opt.apply_gradients(zip(grads, model.trainable_variables))
        epoch_loss = loss_value
        train_loss_results.append(epoch_loss)
    print("Epoch {}: Loss: {}".format(epoch,epoch_loss))
deleted 60 characters in body
Source Link

I implemented a custom training loop for a custom loss function that also incorporates the constraints of an unsupervised parametric optimization problem. The corresponding training loop creates then multiple epoch_loss outputs (the amount of the loss outputs is the same as the number of elements in ab_train). What should I do to obtain just one epoch_loss during each epoch/iteration?

train_loss_results = []

num_epochs = 5200

def loss(model,ab):
    xyz_pred = model(ab)
    return penalty_function(xyz_pred,ab)

def grad(model,ab,xyz):
    with tf.GradientTape() as tape:
        loss_value = loss(model,ab)
    return loss_value, tape.gradient(loss_value,model.trainable_variables)

opt = tf.keras.optimizers.Adam(learning_rate=0.0001)

for epoch in range(num_epochs):        
     epoch_loss = 0
    for ab in ab_train:        
        ab = ab.reshape(1,2)
        xyz_pred_us = model(ab) #forward pass
        loss_value,grads = grad(model,ab_train,xyz_pred_us)
        
        opt.apply_gradients(zip(grads, model.trainable_variables))
        epoch_loss += loss_value
        
 epoch_loss /= len(ab_train)loss_value
        
 train_loss_results.append(epoch_loss) 
    
 print("Epoch {}: Loss: {}".format(epoch,epoch_loss))

I implemented a custom training loop for a custom loss function that also incorporates the constraints of an unsupervised parametric optimization problem. The corresponding training loop creates then multiple epoch_loss outputs (the amount of the loss outputs is the same as the number of elements in ab_train). What should I do to obtain just one epoch_loss during each epoch/iteration?

train_loss_results = []

num_epochs = 5

def loss(model,ab):
    xyz_pred = model(ab)
    return penalty_function(xyz_pred,ab)

def grad(model,ab,xyz):
    with tf.GradientTape() as tape:
        loss_value = loss(model,ab)
    return loss_value, tape.gradient(loss_value,model.trainable_variables)

opt = tf.keras.optimizers.Adam(learning_rate=0.0001)

for epoch in range(num_epochs):        
     epoch_loss = 0
    for ab in ab_train:        
        ab = ab.reshape(1,2)
        xyz_pred_us = model(ab) #forward pass
        loss_value,grads = grad(model,ab_train,xyz_pred_us)
        
        opt.apply_gradients(zip(grads, model.trainable_variables))
        epoch_loss += loss_value
        
 epoch_loss /= len(ab_train)
        
 train_loss_results.append(epoch_loss)    
 print("Epoch {}: Loss: {}".format(epoch,epoch_loss))

I implemented a custom training loop for a custom loss function that also incorporates the constraints of an unsupervised parametric optimization problem. The corresponding training loop creates then multiple epoch_loss outputs (the amount of the loss outputs is the same as the number of elements in ab_train). What should I do to obtain just one epoch_loss during each epoch/iteration?

train_loss_results = []

num_epochs = 200

def loss(model,ab):
    xyz_pred = model(ab)
    return penalty_function(xyz_pred,ab)

def grad(model,ab,xyz):
    with tf.GradientTape() as tape:
        loss_value = loss(model,ab)
    return loss_value, tape.gradient(loss_value,model.trainable_variables)

opt = tf.keras.optimizers.Adam(learning_rate=0.0001)

for epoch in range(num_epochs):        
    for ab in ab_train:        
        ab = ab.reshape(1,2)
        xyz_pred_us = model(ab) #forward pass
        loss_value,grads = grad(model,ab_train,xyz_pred_us)
        
        opt.apply_gradients(zip(grads, model.trainable_variables))
        epoch_loss = loss_value
        train_loss_results.append(epoch_loss) 
    print("Epoch {}: Loss: {}".format(epoch,epoch_loss))
Source Link

customized training loop parametric optimization

I implemented a custom training loop for a custom loss function that also incorporates the constraints of an unsupervised parametric optimization problem. The corresponding training loop creates then multiple epoch_loss outputs (the amount of the loss outputs is the same as the number of elements in ab_train). What should I do to obtain just one epoch_loss during each epoch/iteration?

train_loss_results = []

num_epochs = 5

def loss(model,ab):
    xyz_pred = model(ab)
    return penalty_function(xyz_pred,ab)

def grad(model,ab,xyz):
    with tf.GradientTape() as tape:
        loss_value = loss(model,ab)
    return loss_value, tape.gradient(loss_value,model.trainable_variables)

opt = tf.keras.optimizers.Adam(learning_rate=0.0001)

for epoch in range(num_epochs):        
    epoch_loss = 0
    for ab in ab_train:        
        ab = ab.reshape(1,2)
        xyz_pred_us = model(ab) #forward pass
        loss_value,grads = grad(model,ab_train,xyz_pred_us)
        
        opt.apply_gradients(zip(grads, model.trainable_variables))
        epoch_loss += loss_value
        
epoch_loss /= len(ab_train)
        
train_loss_results.append(epoch_loss)    
print("Epoch {}: Loss: {}".format(epoch,epoch_loss))