Obtain F1score, Recall, Confusion Matrix and precison

65 Views Asked by At

How can I obtain F1score, Recall, Confusion Matrix and precison in this code.I have used compile and obtained accuracy but i dont know how write the code to obtain these metrics from my model.I would be thankful te help me. for comm_round in range(comms_round):

global_weights = global_model.get_weights()

scaled_local_weight_list = list()

client_names= list(clients_batched.keys())
random.shuffle(client_names)

for client in client_names:
    local_model = Transformer
    local_model.compile(loss=tf.keras.losses.CategoricalCrossentropy(),
                        optimizer=tf.keras.optimizers.Adam(learning_rate = 0.001),
                        metrics='acc')

    global_model.set_weights(global_weights)

    local_model.set_weights(global_weights)

    history = local_model.fit(clients_batched[client], epochs=1, verbose=0, callbacks=[checkpoint_callback])

    scaling_factor = weight_scalling_factor(clients_batched, client)
    scaled_weights = scale_model_weights(local_model.get_weights(), scaling_factor)
    scaled_local_weight_list.append(scaled_weights)

    K.clear_session()

average_weights = sum_scaled_weights(scaled_local_weight_list)

global_model.set_weights(average_weights)

for(X_test, Y_test) in test_batched:
    global_acc, global_loss = test_model(test_x, test_y, global_model, comm_round + 1)

Also I want to graph the performance of the model on the train and test sets recorded during training using a line plot, one for each of the loss and the classification accuracy.

2

There are 2 best solutions below

5
Muhammed Yunus On

Keras has precision, AUC, and other metrics listed at: https://keras.io/api/metrics/classification_metrics/

Try using them as follows:

local_model.compile(
    loss=tf.keras.losses.CategoricalCrossentropy(),
    optimizer=tf.keras.optimizers.Adam(learning_rate = 0.001),
    metrics=['acc',
             tf.keras.metrics.Precision(thresholds=0),
             tf.keras.metrics.Recall(thresholds=0),
             tf.keras.metrics.AUC(from_logits=True),
             #could add more metrics...
            ]
)

The threshold=0 and from_logits=True are if your model returns logits, as described on the page above.

To plot the metrics, it's something like:

# list all data in available in history
keys = list(history.history.keys())
print('Info in history is:\n\t', keys)

# select some metrics to plot from history
metricA = keys[0]
metricB = keys[1]
metricC = keys[2]

#create plot
plt.plot(history.history[metricA])
plt.plot(history.history[metricB])
plt.plot(history.history[metricC])

plt.title('History')
plt.ylabel('value')
plt.xlabel('epoch')
plt.legend([metricA, metricB, metricC], loc='upper left')
plt.show()
0
Muhammed Yunus On

This code adds metrics and plots for each loop. Note that, for each metric, there will be one measurement per epoch. So if you have 1 epoch, and you plot the accuracy, it'll just be 1 point.

from tf.keras import metrics

global_weights = global_model.get_weights()

scaled_local_weight_list = list()

client_names= list(clients_batched.keys())
random.shuffle(client_names)

for client in client_names:
    local_model = Transformer
    local_model.compile(
    loss=tf.keras.losses.CategoricalCrossentropy(),
    optimizer=tf.keras.optimizers.Adam(learning_rate = 0.001),
    metrics=['acc',
             metrics.Precision(),
             metrics.Recall(),
             metrics.AUC()]
    )

    global_model.set_weights(global_weights)

    local_model.set_weights(global_weights)

    history = local_model.fit(clients_batched[client], epochs=1, verbose=0, callbacks=[checkpoint_callback])

    scaling_factor = weight_scalling_factor(clients_batched, client)
    scaled_weights = scale_model_weights(local_model.get_weights(), scaling_factor)
    scaled_local_weight_list.append(scaled_weights)

    # list all data in available in history
    keys = list(history.history.keys())
    print(f'Information available in history for {client} is:\n\t', keys)
    
    # select some metrics to plot from history
    metricA = keys[0]
    metricB = keys[1]
    metricC = keys[2]
    
    #create plot
    plt.plot(history.history[metricA])
    plt.plot(history.history[metricB])
    plt.plot(history.history[metricC])

    plt.title('History')
    plt.ylabel('value')
    plt.xlabel('epoch')
    plt.legend([metricA, metricB, metricC], loc='upper left')
    plt.show()

    K.clear_session()

average_weights = sum_scaled_weights(scaled_local_weight_list)

global_model.set_weights(average_weights)

for(X_test, Y_test) in test_batched:
    global_acc, global_loss = test_model(test_x, test_y, global_model, comm_round + 1)