|
| 1 | +#!/usr/bin/env python |
| 2 | +# Created by "Thieu" at 14:44, 05/02/2023 ----------% |
| 3 | +# Email: nguyenthieu2102@gmail.com % |
| 4 | +# Github: https://github.com/thieu1995 % |
| 5 | +# --------------------------------------------------% |
| 6 | + |
| 7 | +import numpy as np |
| 8 | +from permetrics.classification import ClassificationMetric |
| 9 | + |
| 10 | +## For integer labels or categorical labels |
| 11 | +y_true = [0, 1, 0, 0, 1, 0, 0, 0, 1, 2, 2, 2, 0] |
| 12 | +y_pred = [0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1] |
| 13 | + |
| 14 | +# y_true = np.array([0, 1, 0, 0, 1, 0, 0, 0, 1, 2, 2, 2, 0]) |
| 15 | +# y_pred = np.array([0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1]) |
| 16 | + |
| 17 | +# y_true = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) |
| 18 | +# y_pred = np.array([[0.1, 0.8, 0.1], [0.8, 0.1, 0.1], [0.1, 0.1, 0.8]]) |
| 19 | + |
| 20 | +# y_true = ["cat", "ant", "cat", "cat", "ant", "bird", "bird", "bird"] |
| 21 | +# y_pred = ["ant", "ant", "cat", "cat", "ant", "cat", "bird", "ant"] |
| 22 | + |
| 23 | +# y_true = [["cat", "ant"], ["cat", "cat"], ["ant", "bird"], ["bird", "bird"]] |
| 24 | +# y_pred = [["ant", "ant"], ["cat", "cat"], ["ant", "cat"], ["bird", "ant"]] |
| 25 | + |
| 26 | +# cm = ClassificationMetric(y_true, y_pred, decimal=5) |
| 27 | +# print(cm.jaccard_similarity_index(average=None)) |
| 28 | +# print(cm.jaccard_similarity_coefficient(average="micro")) |
| 29 | +# print(cm.jsi(average="macro")) |
| 30 | +# print(cm.jsc(average="weighted")) |
| 31 | + |
| 32 | +# cm = ClassificationMetric(y_true, y_pred, decimal=5) |
| 33 | +# print(cm.gini_index()) |
| 34 | +# print(cm.cks(average="micro")) |
| 35 | +# print(cm.CKS(average="macro")) |
| 36 | +# print(cm.CKS(average="weighted")) |
| 37 | + |
| 38 | +# print(cm.mcc(average=None)) |
| 39 | +# print(cm.mcc(average="micro")) |
| 40 | +# print(cm.mcc(average="macro")) |
| 41 | +# print(cm.mcc(average="weighted")) |
| 42 | + |
| 43 | +# Example true labels and predicted scores for a 3-class problem |
| 44 | +# y_true = np.array([0, 1, 2, 1, 2, 0, 0, 1]) |
| 45 | +# y_score = np.array([[0.8, 0.1, 0.1], |
| 46 | +# [0.2, 0.5, 0.3], |
| 47 | +# [0.1, 0.3, 0.6], |
| 48 | +# [0.3, 0.7, 0.0], |
| 49 | +# [0.4, 0.3, 0.3], |
| 50 | +# [0.6, 0.2, 0.2], |
| 51 | +# [0.9, 0.1, 0.0], |
| 52 | +# [0.1, 0.8, 0.1]]) |
| 53 | +# |
| 54 | +# # y_true = [0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0] |
| 55 | +# # y_score = [0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1] |
| 56 | +# |
| 57 | +# cm = ClassificationMetric(y_true, y_pred, decimal=5) |
| 58 | +# print(cm.roc_auc_score(y_true, y_score, average="weighted")) |
| 59 | + |
| 60 | + |
| 61 | +cm = ClassificationMetric(y_true, y_pred, decimal=5) |
| 62 | +print(cm.gini_index(average=None)) |
| 63 | +print(cm.GINI(average="macro")) |
| 64 | +print(cm.gini(average="weighted")) |
| 65 | + |
0 commit comments