Skip to content

Commit

Permalink
Merge pull request opendatahub-io#279 from atheo89/fine-tuning
Browse files Browse the repository at this point in the history
Fine tunings on the main in favor of the new release
  • Loading branch information
openshift-ci[bot] authored Oct 23, 2023
2 parents 0e66323 + 8159569 commit 33399dd
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 27 deletions.
8 changes: 1 addition & 7 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
CONTAINER_ENGINE ?= podman
IMAGE_REGISTRY ?= quay.io/opendatahub/workbench-images
RELEASE ?= 2023a
RELEASE ?= 2023b
DATE ?= $(shell date +'%Y%m%d')
IMAGE_TAG ?= $(RELEASE)_$(DATE)
KUBECTL_BIN ?= bin/kubectl
Expand Down Expand Up @@ -350,12 +350,6 @@ test-%: bin/kubectl
$(call test_with_papermill,minimal,ubi8,python-3.8) \
elif echo "$(FULL_NOTEBOOK_NAME)" | grep -q "datascience-ubi8"; then \
$(MAKE) validate-ubi8-datascience -e FULL_NOTEBOOK_NAME=$(FULL_NOTEBOOK_NAME); \
elif echo "$(FULL_NOTEBOOK_NAME)" | grep -q "pytorch-ubi8"; then \
$(MAKE) validate-ubi8-datascience -e FULL_NOTEBOOK_NAME=$(FULL_NOTEBOOK_NAME); \
$(call test_with_papermill,pytorch,ubi8,python-3.8) \
elif echo "$(FULL_NOTEBOOK_NAME)" | grep -q "tensorflow-ubi8"; then \
$(MAKE) validate-ubi8-datascience -e FULL_NOTEBOOK_NAME=$(FULL_NOTEBOOK_NAME); \
$(call test_with_papermill,tensorflow,ubi8,python-3.8) \
elif echo "$(FULL_NOTEBOOK_NAME)" | grep -q "trustyai-ubi8"; then \
$(MAKE) validate-ubi8-datascience -e FULL_NOTEBOOK_NAME=$(FULL_NOTEBOOK_NAME); \
$(call test_with_papermill,trustyai,ubi8,python-3.8) \
Expand Down
21 changes: 11 additions & 10 deletions jupyter/trustyai/ubi8-python-3.8/test/test_notebook.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
"from platform import python_version\n",
"from trustyai.metrics.fairness.group import statistical_parity_difference\n",
"from trustyai.model import output\n",
"from trustyai.metrics.fairness.group import disparate_impact_ratio\n",
"\n",
"class TestTrustyaiNotebook(unittest.TestCase):\n",
"\n",
Expand All @@ -28,7 +27,7 @@
" self.assertEqual(actual_major_minor, expected_major_minor, \"incorrect version\")\n",
" \n",
" def test_fairnessmetrics(self):\n",
" url_unbiased = \"https://raw.githubusercontent.com/dibryant/notebooks/trustyai/jupyter/trustyai/ubi8-python-3.8/test/income-unbiased.csv\"\n",
" url_unbiased = \"https://raw.githubusercontent.com/opendatahub-io/notebooks/main/jupyter/trustyai/ubi8-python-3.8/test/income-unbiased.csv\"\n",
" nobias = pd.read_csv(url_unbiased, index_col=False)\n",
" \n",
" nobias = pd.read_csv(url_unbiased, index_col=False)\n",
Expand All @@ -43,23 +42,25 @@
" favorable=[favorable])\n",
" self.assertTrue(score >= 0.0036255104824703954) \n",
" print(\"On the test_fairness_metrics test case the statistical_parity_difference score for this dataset is between the threshold [-0.1,0.1], which classifies the model as reasonably fair.\")\n",
" \n",
" def test_datafairness(self):\n",
" url_biased = \"https://raw.githubusercontent.com/dibryant/notebooks/trustyai/jupyter/trustyai/ubi8-python-3.8/test/income-biased.csv\"\n",
" bias = pd.read_csv(url_biased, index_col=False)\n",
" \n",
" def test_datafairness(self):\n",
" url_biased = \"https://raw.githubusercontent.com/opendatahub-io/notebooks/main/jupyter/trustyai/ubi8-python-3.8/test/income-biased.csv\"\n",
" bias = pd.read_csv(url_biased, index_col=False)\n",
" bias.groupby(['gender', 'income'])['income'].count()\n",
" bias.groupby(['gender', 'income'])['income'].count().unstack().plot.bar()\n",
" \n",
" # Perform the data manipulations \n",
" grouped_counts = bias.groupby(['gender', 'income'])['income'].count()\n",
" unstacked_counts = bias.groupby(['gender', 'income'])['income'].count().unstack()\n",
" unstacked_counts.plot.bar()\n",
"\n",
" bias_privileged = bias[bias.gender == 1]\n",
" bias_unprivileged = bias[bias.gender == 0]\n",
" favorable = output(\"income\", dtype=\"number\", value=1)\n",
" score = statistical_parity_difference(privileged=bias_privileged,\n",
" unprivileged=bias_unprivileged,\n",
" favorable=[favorable])\n",
" self.assertTrue(score >= -0.15670061634672994) \n",
" print(\"On the test_fairness_metrics test case the statistical_parity_difference score for this dataset is between the threshold [-0.1,0.1], which classifies the model as reasonably fair.\")\n",
" self.assertTrue(score <= -0.15670061634672994)\n",
" print(\"On the test_bias_metrics test case the statistical_parity_difference score for this dataset, as expected, is outside the threshold [-0.1,0.1], which classifies the model as unfair.\")\n",
" \n",
"\n",
"suite = unittest.TestLoader().loadTestsFromTestCase(TestTrustyaiNotebook)\n",
"unittest.TextTestRunner().run(suite)"
Expand Down
20 changes: 10 additions & 10 deletions jupyter/trustyai/ubi9-python-3.9/test/test_notebook.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
"from platform import python_version\n",
"from trustyai.metrics.fairness.group import statistical_parity_difference\n",
"from trustyai.model import output\n",
"from trustyai.metrics.fairness.group import disparate_impact_ratio\n",
"\n",
"class TestTrustyaiNotebook(unittest.TestCase):\n",
"\n",
Expand All @@ -28,7 +27,7 @@
" self.assertEqual(actual_major_minor, expected_major_minor, \"incorrect version\")\n",
"\n",
" def test_fairnessmetrics(self):\n",
" url_unbiased = \"https://raw.githubusercontent.com/dibryant/notebooks/trustyai/jupyter/trustyai/ubi9-python-3.9/test/income-unbiased.csv\"\n",
" url_unbiased = \"https://raw.githubusercontent.com/opendatahub-io/notebooks/main/jupyter/trustyai/ubi9-python-3.9/test/income-unbiased.csv\"\n",
" nobias = pd.read_csv(url_unbiased, index_col=False)\n",
" \n",
" nobias = pd.read_csv(url_unbiased, index_col=False)\n",
Expand All @@ -45,24 +44,25 @@
" print(\"On the test_fairness_metrics test case the statistical_parity_difference score for this dataset is between the threshold [-0.1,0.1], which classifies the model as reasonably fair.\")\n",
" \n",
" def test_datafairness(self):\n",
" url_biased = \"https://raw.githubusercontent.com/dibryant/notebooks/trustyai/jupyter/trustyai/ubi9-python-3.9/test/income-biased.csv\"\n",
" url_biased = \"https://raw.githubusercontent.com/opendatahub-io/notebooks/main/jupyter/trustyai/ubi9-python-3.9/test/income-biased.csv\"\n",
" bias = pd.read_csv(url_biased, index_col=False)\n",
" \n",
" bias = pd.read_csv(url_biased, index_col=False)\n",
" bias.groupby(['gender', 'income'])['income'].count()\n",
" bias.groupby(['gender', 'income'])['income'].count().unstack().plot.bar()\n",
" \n",
" # Perform the data manipulations \n",
" grouped_counts = bias.groupby(['gender', 'income'])['income'].count()\n",
" unstacked_counts = bias.groupby(['gender', 'income'])['income'].count().unstack()\n",
" unstacked_counts.plot.bar()\n",
"\n",
" bias_privileged = bias[bias.gender == 1]\n",
" bias_unprivileged = bias[bias.gender == 0]\n",
" favorable = output(\"income\", dtype=\"number\", value=1)\n",
" score = statistical_parity_difference(privileged=bias_privileged,\n",
" unprivileged=bias_unprivileged,\n",
" favorable=[favorable])\n",
" self.assertTrue(score >= -0.15670061634672994) \n",
" print(\"On the test_fairness_metrics test case the statistical_parity_difference score for this dataset is between the threshold [-0.1,0.1], which classifies the model as reasonably fair.\")\n",
" self.assertTrue(score <= -0.15670061634672994)\n",
" print(\"On the test_bias_metrics test case the statistical_parity_difference score for this dataset, as expected, is outside the threshold [-0.1,0.1], which classifies the model as unfair.\")\n",
" \n",
"suite = unittest.TestLoader().loadTestsFromTestCase(TestTrustyaiNotebook)\n",
"unittest.TextTestRunner().run(suite)\n"
"unittest.TextTestRunner().run(suite)"
]
}
],
Expand Down

0 comments on commit 33399dd

Please sign in to comment.