Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion tensorboard/plugins/pr_curve/pr_curve_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ def run_all(logdir, steps, thresholds, verbose=False):

def main(unused_argv):
print("Saving output to %s." % FLAGS.logdir)
run_all(FLAGS.logdir, FLAGS.steps, 50, verbose=True)
run_all(FLAGS.logdir, FLAGS.steps, 51, verbose=True)
print("Done. Output saved to %s." % FLAGS.logdir)


Expand Down
34 changes: 16 additions & 18 deletions tensorboard/plugins/pr_curve/pr_curves_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,23 +205,27 @@ def _make_pr_entry(self, step, wall_time, data_array):
Returns:
A PR curve entry.
"""
# Trim entries for which TP + FP = 0 (precision is undefined) at the tail of
# the data.
true_positives = [
int(v) for v in data_array[metadata.TRUE_POSITIVES_INDEX]
]
false_positives = [
int(v) for v in data_array[metadata.FALSE_POSITIVES_INDEX]
]
tp_index = metadata.TRUE_POSITIVES_INDEX
fp_index = metadata.FALSE_POSITIVES_INDEX
tn_index = metadata.TRUE_NEGATIVES_INDEX
fn_index = metadata.FALSE_NEGATIVES_INDEX

# Trim entries for which TP + FP = 0 (precision is undefined) at the tail of
# the data.
positives = data_array[[tp_index, fp_index], :].astype(int).sum(axis=0)
# Searching from the end, find the farthest index where TP + FP = 0.
end_index_inclusive = len(positives) - 1
while end_index_inclusive > 0 and positives[end_index_inclusive] == 0:
end_index_inclusive -= 1
end_index = end_index_inclusive + 1
# Generate thresholds in [0, 1].
num_thresholds = data_array.shape[1]
thresholds = (np.arange(1, end_index + 1) / num_thresholds).tolist()
thresholds = np.linspace(0.0, 1.0, num_thresholds)

true_positives = [int(v) for v in data_array[tp_index]]
false_positives = [int(v) for v in data_array[fp_index]]
true_negatives = [int(v) for v in data_array[tn_index]]
false_negatives = [int(v) for v in data_array[fn_index]]

return {
"wall_time": wall_time,
Expand All @@ -232,13 +236,7 @@ def _make_pr_entry(self, step, wall_time, data_array):
"recall": data_array[metadata.RECALL_INDEX, :end_index].tolist(),
"true_positives": true_positives[:end_index],
"false_positives": false_positives[:end_index],
"true_negatives": [
int(v)
for v in data_array[metadata.TRUE_NEGATIVES_INDEX][:end_index]
],
"false_negatives": [
int(v)
for v in data_array[metadata.FALSE_NEGATIVES_INDEX][:end_index]
],
"thresholds": thresholds,
"true_negatives": true_negatives[:end_index],
"false_negatives": false_negatives[:end_index],
"thresholds": thresholds[:end_index].tolist(),
}
12 changes: 6 additions & 6 deletions tensorboard/plugins/pr_curve/pr_curves_plugin_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ def testPrCurvesDataCorrect(self):
expected_false_positives=[300, 201, 38, 2],
expected_true_negatives=[0, 99, 262, 298],
expected_false_negatives=[0, 24, 105, 144],
expected_thresholds=[0.2, 0.4, 0.6, 0.8],
expected_thresholds=[0.0, 0.25, 0.5, 0.75],
pr_curve_entry=entries[0],
)
self.validatePrCurveEntry(
Expand All @@ -225,7 +225,7 @@ def testPrCurvesDataCorrect(self):
expected_false_positives=[300, 204, 39, 6],
expected_true_negatives=[0, 96, 261, 294],
expected_false_negatives=[0, 22, 105, 146],
expected_thresholds=[0.2, 0.4, 0.6, 0.8],
expected_thresholds=[0.0, 0.25, 0.5, 0.75],
pr_curve_entry=entries[1],
)
self.validatePrCurveEntry(
Expand All @@ -236,7 +236,7 @@ def testPrCurvesDataCorrect(self):
expected_false_positives=[300, 185, 38, 2],
expected_true_negatives=[0, 115, 262, 298],
expected_false_negatives=[0, 30, 111, 146],
expected_thresholds=[0.2, 0.4, 0.6, 0.8],
expected_thresholds=[0.0, 0.25, 0.5, 0.75],
pr_curve_entry=entries[2],
)

Expand All @@ -252,7 +252,7 @@ def testPrCurvesDataCorrect(self):
expected_false_positives=[150, 105, 18, 0],
expected_true_negatives=[0, 45, 132, 150],
expected_false_negatives=[0, 11, 54, 70],
expected_thresholds=[0.2, 0.4, 0.6, 0.8],
expected_thresholds=[0.0, 0.25, 0.5, 0.75],
pr_curve_entry=entries[0],
)
self.validatePrCurveEntry(
Expand All @@ -263,7 +263,7 @@ def testPrCurvesDataCorrect(self):
expected_false_positives=[150, 99, 21, 3],
expected_true_negatives=[0, 51, 129, 147],
expected_false_negatives=[0, 13, 54, 74],
expected_thresholds=[0.2, 0.4, 0.6, 0.8],
expected_thresholds=[0.0, 0.25, 0.5, 0.75],
pr_curve_entry=entries[1],
)
self.validatePrCurveEntry(
Expand All @@ -274,7 +274,7 @@ def testPrCurvesDataCorrect(self):
expected_false_positives=[150, 92, 20, 1],
expected_true_negatives=[0, 58, 130, 149],
expected_false_negatives=[0, 14, 59, 73],
expected_thresholds=[0.2, 0.4, 0.6, 0.8],
expected_thresholds=[0.0, 0.25, 0.5, 0.75],
pr_curve_entry=entries[2],
)

Expand Down
63 changes: 63 additions & 0 deletions tensorboard/plugins/pr_curve/summary_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,6 +285,69 @@ def test_counts_below_1(self):
values = tensor_util.make_ndarray(pb.value[0].tensor)
self.verify_float_arrays_are_equal(expected, values)

def test_predictions_at_thresholds(self):
num_thresholds = 5
# For 5 thresholds, we expect the thresholds used to be:
# [0.0, 0.25, 0.5, 0.75, 1.0]
pb = self.compute_and_check_summary_pb(
name="foo",
labels=np.array([True] * num_thresholds),
predictions=np.float32([0.0, 0.25, 0.5, 0.75, 1.0]),
num_thresholds=num_thresholds,
)
expected = [
[5.0, 4.0, 3.0, 2.0, 1.0], # TP
[0.0, 0.0, 0.0, 0.0, 0.0], # FP
[0.0, 0.0, 0.0, 0.0, 0.0], # TN
[0.0, 1.0, 2.0, 3.0, 4.0], # FN
[1.0, 1.0, 1.0, 1.0, 1.0], # Precision
[1.0, 0.8, 0.6, 0.4, 0.2], # Recall
]
values = tensor_util.make_ndarray(pb.value[0].tensor)
self.verify_float_arrays_are_equal(expected, values)

def test_predictions_above_thresholds(self):
num_thresholds = 5
# For 5 thresholds, we expect the thresholds used to be:
# [0.0, 0.25, 0.5, 0.75, 1.0]
pb = self.compute_and_check_summary_pb(
name="foo",
labels=np.array([True] * num_thresholds),
predictions=np.float32([0.01, 0.26, 0.51, 0.76, 1.0]),
num_thresholds=num_thresholds,
)
expected = [
[5.0, 4.0, 3.0, 2.0, 1.0], # TP
[0.0, 0.0, 0.0, 0.0, 0.0], # FP
[0.0, 0.0, 0.0, 0.0, 0.0], # TN
[0.0, 1.0, 2.0, 3.0, 4.0], # FN
[1.0, 1.0, 1.0, 1.0, 1.0], # Precision
[1.0, 0.8, 0.6, 0.4, 0.2], # Recall
]
values = tensor_util.make_ndarray(pb.value[0].tensor)
self.verify_float_arrays_are_equal(expected, values)

def test_predictions_below_thresholds(self):
num_thresholds = 5
# For 5 thresholds, we expect the thresholds used to be:
# [0.0, 0.25, 0.5, 0.75, 1.0]
pb = self.compute_and_check_summary_pb(
name="foo",
labels=np.array([True] * num_thresholds),
predictions=np.float32([0.0, 0.24, 0.49, 0.74, 0.99]),
num_thresholds=num_thresholds,
)
expected = [
[5.0, 3.0, 2.0, 1.0, 0.0], # TP
[0.0, 0.0, 0.0, 0.0, 0.0], # FP
[0.0, 0.0, 0.0, 0.0, 0.0], # TN
[0.0, 2.0, 3.0, 4.0, 5.0], # FN
[1.0, 1.0, 1.0, 1.0, 0.0], # Precision
[1.0, 0.6, 0.4, 0.2, 0.0], # Recall
]
values = tensor_util.make_ndarray(pb.value[0].tensor)
self.verify_float_arrays_are_equal(expected, values)

def test_raw_data(self):
# We pass these raw counts and precision/recall values.
name = "foo"
Expand Down