I'm trying to add custom metrics (precision, recall and f1) to my run using the TKipf GCN model https://github.com/tkipf/gcn. I built up masked functions for those metrics, and when I tried integrating them into the tf.session.run call in the evaluate method, I got this error: TypeError: Fetch argument 0 has invalid type <class 'int'>, must be a string or Tensor. (Can not convert a int into a Tensor or Operation.
I checked other posts with similar titles, but I'm not using duplicate variable names. Here is the code where the error is being thrown:
Evaluation function:
# Define model evaluation function
def evaluate(features, support, labels, mask, placeholders, sess, model):
t_test = time.time()
feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders)
outs_val = sess.run([model.loss, model.accuracy, model.precision, model.recall, model.f1], feed_dict=feed_dict_val)
return outs_val[0], outs_val[1], (time.time() - t_test), outs_val[2], outs_val[3], outs_val[4]
The error was not thrown when it was just model.loss and model.accuracy, but it was when I added model.precision, model.recall and model.f1
Here are the 5 relevant functions for reference:
Loss and Accuracy (originally there):
def masked_softmax_cross_entropy(preds, labels, mask):
"""Softmax cross-entropy loss with masking."""
loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
def masked_accuracy(preds, labels, mask):
"""Accuracy with masking."""
correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1))
accuracy_all = tf.cast(correct_prediction, tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
accuracy_all *= mask
return tf.reduce_mean(accuracy_all)
Precision, recall and f1(my functions):
def masked_precision(preds, labels, mask):
preds_ints = tf.argmax(preds, 1)
labels_ints = tf.argmax(labels, 1)
mask = tf.cast(mask, dtype=tf.float32)
trueposlayer = keras.metrics.TruePositives()
trueposlayer.update_state(labels_ints, preds_ints, sample_weight=mask)
truepos = trueposlayer.result().numpy()
falseposlayer = keras.metrics.FalsePositives()
falseposlayer.update_state(labels_ints,preds_ints, sample_weight=mask)
falsepos = falseposlayer.result().numpy()
return tf.convert_to_tensor(calc_precision(truepos, falsepos))
def masked_recall(preds, labels, mask):
preds_ints = tf.argmax(preds, 1)
labels_ints = tf.argmax(labels, 1)
mask = tf.cast(mask, dtype=tf.float32)
trueposlayer = keras.metrics.TruePositives()
trueposlayer.update_state(labels_ints, preds_ints, sample_weight=mask)
truepos = trueposlayer.result().numpy()
falseneglayer = keras.metrics.FalseNegatives()
falseneglayer.update_state(labels_ints,preds_ints, sample_weight=mask)
falseneg = falseneglayer.result().numpy()
return tf.convert_to_tensor(calc_recall(truepos, falseneg))
def masked_f1_score(preds, labels, mask):
preds_ints = tf.argmax(preds, 1)
labels_ints = tf.argmax(labels, 1)
mask = tf.cast(mask, dtype=tf.float32)
trueposlayer = keras.metrics.TruePositives()
trueposlayer.update_state(labels_ints, preds_ints, sample_weight=mask)
truepos = trueposlayer.result().numpy()
falseposlayer = keras.metrics.FalsePositives()
falseposlayer.update_state(labels_ints,preds_ints, sample_weight=mask)
falsepos = falseposlayer.result().numpy()
falseneglayer = keras.metrics.FalseNegatives()
falseneglayer.update_state(labels_ints,preds_ints, sample_weight=mask)
falseneg = falseneglayer.result().numpy()
recall = calc_recall(truepos, falseneg)
precision = calc_precision(truepos, falsepos)
return tf.convert_to_tensor(2*((precision*recall)/(precision+recall)))