I try to code a semi-supervised model for a project besides the university. First I got a model which trained with the model.fit (I tested the model with supervised learning first). But for semi-supervised learning, I need flexibility, therefore I tried a custom training loop.

The problem is that the model doesn't seem to accept the weights for the data samples.

Node: At the moment the code doesn't train with unlabeled data, it should just mimik model.fit.

At the moment I try to use this code:

model = nn.model_lite(ds, shapes=nn.get_shapes(x_train))
#The model is implemented from another script.

#Gewichtung:
def generate_class_weight(class_series, multi_class=True, one_hot_encoded=False):
    if multi_class:
        if one_hot_encoded:
            class_series = np.argmax(class_series, axis=1)

        class_labels = np.unique(class_series)
        class_weights = compute_class_weight(class_weight='balanced', classes=class_labels, y=class_series)

        return dict(zip(class_labels, class_weights))
    else:
        mlb = None
        if not one_hot_encoded:
            mlb = MultiLabelBinarizer()
            class_series = mlb.fit_transform(class_series)

        n_samples = len(class_series)
        n_classes = len(class_series[0])

        class_count = [0]*n_classes

        for classes in class_series:
            for index in range(n_classes):
                if classes[index] != 0:
                    class_count[index] += 2

        class_weights = [n_samples/(n_classes * freq) if freq > 0 else 1 for freq in class_count]
        class_labels = range(len(class_weights)) if mlb is None else mlb.classes_
        return dict(zip(class_labels, class_weights))


class_weights = generate_class_weight(y_train)

Batches = []
unlab_batches = []
y_batch = []
# Test for the Lost-Fuction:
for i in range(batch_len2):
    b =  batched_dict(x_train, i, batch_size)
    Batches.append(b)
    d = y_train[i * batch_size:(i+1)*batch_size]
    y_batch.append(d)
batch = {}
for key in x_train:
    batch[key] = x_train[key][(i+1)*batch_size:]
d = y_train[(i+1)*batch_size:]
y_batch.append(d)

Batches.append(batch)
print(Batches)
print(Batches[0])

for i in range(unlab_len):
    c = batched_dict(x_unlabel, i, batch_size)
    unlab_batches.append(c)

batch = {}
for key in x_unlabel:
    batch[key] = x_unlabel[key][(i+1)*batch_size:len(train_unlabels)]
unlab_batches.append(batch)

print(len(unlab_batches))
print(unlab_batches[0])


def new_loss(model, y, x_train,x_unlabel, training):
    
    sample_weights = (np.ones(y.shape) - y)*class_weights[0] + y*class_weights[1]
    

    

    
    y_pred = model(x_train, training=training)
    


    Loss_BCE = tf.keras.losses.binary_crossentropy
    Loss_unlab = self_consistency_loss
    #Loss_ges = Loss_BCE(y, y_pred) + Loss_unlab(y_pred, y, y_unlab)
    Loss_ges = Loss_BCE(y, y_pred)
    
    Loss_ges = Loss_ges * sample_weights
    #print(Loss_ges.shape)
    
    
    return Loss_ges


def lr_schedule(epoch):
    lr = 1e-3
    #lr = 1e-5
    if epoch > 20:
        lr *= 0.01
    elif epoch > 10:
        lr *= 0.1
    return lr


def grad(model, y, x_lab , x_unlab):
    with tf.GradientTape() as tape:
    tape.watch(model.trainable_variables)
    loss_value = new_loss(model, y, x_lab, x_unlab, training=True)
    grads = tape.gradient(loss_value, model.trainable_variables)
    

    return loss_value, grads


optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule(0))


result_loss = []
result_acc = []

num_epochs = 60

for epoch in range(num_epochs):
    epoch_loss_average = tf.keras.metrics.Mean()
    epoch_accuracy = tf.keras.metrics.BinaryAccuracy()

    for j in range(3):
        x = Batches[j]
        y = y_batch[j]
        z = unlab_batches[j]
        loss_value, grads = grad(model, y, x, z)
        optimizer.apply_gradients(zip(grads, model.trainable_variables))

        # Track
        epoch_loss_average.update_state(loss_value)
        epoch_accuracy.update_state(y, model(x, training=True))
        

    # The End
    result_loss.append(epoch_loss_average.result())
    result_acc.append(epoch_accuracy.result())
    
    if epoch % 5 == 0:
        print("Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}".format(epoch,
                                                                epoch_loss_average.result(),
                                                                epoch_accuracy.result()))


filepath = path
model.save(filepath)

I tried to use tried to simplify the loss function. But except for that, I have no idea why it isn't working. I literally just copied this tutorial: https://www.tensorflow.org/tutorials/customization/custom_training_walkthrough#train_the_model

for the trainings-loop

0

There are 0 best solutions below