Skip to content

Ci lint #390

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Aug 4, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .github/actions/setup-env/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,3 @@ runs:
run: |
python -m pip install --upgrade pip
pip install ${{ inputs.package-root-dir }}[test]

21 changes: 21 additions & 0 deletions .github/workflows/ci-lint.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
name: pre-commit

on:
pull_request:
push:
branches: [master]

jobs:
pre-commit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4.1.7
with:
# Ensure the full history is fetched
# This is required to run pre-commit on a specific set of commits
# TODO: Remove this when all the pre-commit issues are fixed
fetch-depth: 0
- uses: actions/setup-python@v5.1.1
with:
python-version: 3.13
- uses: pre-commit/action@v3.0.1
39 changes: 39 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# pre-commit is a tool to perform a predefined set of tasks manually and/or
# automatically before git commits are made.
#
# Config reference: https://pre-commit.com/#pre-commit-configyaml---top-level
#
# Common tasks
#
# - Register git hooks: pre-commit install --install-hooks
# - Run on all files: pre-commit run --all-files
#
# These pre-commit hooks are run as CI.
#
# NOTE: if it can be avoided, add configs/args in pyproject.toml or below instead of creating a new `.config.file`.
# https://pre-commit.ci/#configuration
ci:
autoupdate_schedule: monthly
autofix_commit_msg: |
[pre-commit.ci] Apply automatic pre-commit fixes

repos:
# general
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
hooks:
- id: end-of-file-fixer
exclude: '\.svg$'
- id: trailing-whitespace
exclude: '\.svg$'
- id: check-json
- id: check-yaml
args: [--allow-multiple-documents, --unsafe]
- id: check-toml

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.5.6
hooks:
- id: ruff
args: ["--fix"]
- id: ruff-format
2 changes: 1 addition & 1 deletion docs/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ options {
- ![Fairness Indicators on the TensorFlow blog](images/tf_full_color_primary_icon.svg)

### [Fairness Indicators on the TensorFlow blog](https://blog.tensorflow.org/2019/12/fairness-indicators-fair-ML-systems.html)

---

[Read on the TensorFlow blog](https://blog.tensorflow.org/2019/12/fairness-indicators-fair-ML-systems.html)
Expand Down
2 changes: 1 addition & 1 deletion docs/javascripts/mathjax.js
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ window.MathJax = {
}
};

document$.subscribe(() => {
document$.subscribe(() => {
MathJax.startup.output.clearCache()
MathJax.typesetClear()
MathJax.texReset()
Expand Down
1 change: 0 additions & 1 deletion docs/tutorials/_toc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,3 @@ toc:
path: /responsible_ai/fairness_indicators/tutorials/Fairness_Indicators_Pandas_Case_Study
- title: FaceSSD example Colab
path: /responsible_ai/fairness_indicators/tutorials/Facessd_Fairness_Indicators_Example_Colab

159 changes: 80 additions & 79 deletions fairness_indicators/example_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,15 @@

from typing import Any

from fairness_indicators import fairness_indicators_metrics # pylint: disable=unused-import
from tensorflow import keras
import tensorflow.compat.v1 as tf
import tensorflow_model_analysis as tfma
from tensorflow import keras

from fairness_indicators import fairness_indicators_metrics # noqa: F401

TEXT_FEATURE = 'comment_text'
LABEL = 'toxicity'
SLICE = 'slice'
TEXT_FEATURE = "comment_text"
LABEL = "toxicity"
SLICE = "slice"
FEATURE_MAP = {
LABEL: tf.io.FixedLenFeature([], tf.float32),
TEXT_FEATURE: tf.io.FixedLenFeature([], tf.string),
Expand All @@ -38,74 +38,75 @@


class ExampleParser(keras.layers.Layer):
"""A Keras layer that parses the tf.Example."""
"""A Keras layer that parses the tf.Example."""

def __init__(self, input_feature_key):
self._input_feature_key = input_feature_key
self.input_spec = keras.layers.InputSpec(shape=(1,), dtype=tf.string)
super().__init__()

def __init__(self, input_feature_key):
self._input_feature_key = input_feature_key
self.input_spec = keras.layers.InputSpec(shape=(1,), dtype=tf.string)
super().__init__()
def compute_output_shape(self, input_shape: Any):
return [1, 1]

def compute_output_shape(self, input_shape: Any):
return [1, 1]
def call(self, serialized_examples):
def get_feature(serialized_example):
parsed_example = tf.io.parse_single_example(
serialized_example, features=FEATURE_MAP
)
return parsed_example[self._input_feature_key]

def call(self, serialized_examples):
def get_feature(serialized_example):
parsed_example = tf.io.parse_single_example(
serialized_example, features=FEATURE_MAP
)
return parsed_example[self._input_feature_key]
serialized_examples = tf.cast(serialized_examples, tf.string)
return tf.map_fn(get_feature, serialized_examples)
serialized_examples = tf.cast(serialized_examples, tf.string)
return tf.map_fn(get_feature, serialized_examples)


class Reshaper(keras.layers.Layer):
"""A Keras layer that reshapes the input."""
"""A Keras layer that reshapes the input."""

def call(self, inputs):
return tf.reshape(inputs, (1, 32))
def call(self, inputs):
return tf.reshape(inputs, (1, 32))


class Caster(keras.layers.Layer):
"""A Keras layer that reshapes the input."""
"""A Keras layer that reshapes the input."""

def call(self, inputs):
return tf.cast(inputs, tf.float32)
def call(self, inputs):
return tf.cast(inputs, tf.float32)


def get_example_model(input_feature_key: str):
"""Returns a Keras model for testing."""
parser = ExampleParser(input_feature_key)
text_vectorization = keras.layers.TextVectorization(
max_tokens=32,
output_mode='int',
output_sequence_length=32,
)
text_vectorization.adapt(
['nontoxic', 'toxic comment', 'test comment', 'abc', 'abcdef', 'random']
)
dense1 = keras.layers.Dense(
32,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
)
dense2 = keras.layers.Dense(
1,
activation=None,
use_bias=False,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
)

inputs = tf.keras.Input(shape=(), dtype=tf.string)
parsed_example = parser(inputs)
text_vector = text_vectorization(parsed_example)
text_vector = Reshaper()(text_vector)
text_vector = Caster()(text_vector)
output1 = dense1(text_vector)
output2 = dense2(output1)
return tf.keras.Model(inputs=inputs, outputs=output2)
"""Returns a Keras model for testing."""
parser = ExampleParser(input_feature_key)
text_vectorization = keras.layers.TextVectorization(
max_tokens=32,
output_mode="int",
output_sequence_length=32,
)
text_vectorization.adapt(
["nontoxic", "toxic comment", "test comment", "abc", "abcdef", "random"]
)
dense1 = keras.layers.Dense(
32,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
)
dense2 = keras.layers.Dense(
1,
activation=None,
use_bias=False,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
)

inputs = tf.keras.Input(shape=(), dtype=tf.string)
parsed_example = parser(inputs)
text_vector = text_vectorization(parsed_example)
text_vector = Reshaper()(text_vector)
text_vector = Caster()(text_vector)
output1 = dense1(text_vector)
output2 = dense2(output1)
return tf.keras.Model(inputs=inputs, outputs=output2)


def evaluate_model(
Expand All @@ -114,23 +115,23 @@ def evaluate_model(
tfma_eval_result_path,
eval_config,
):
"""Evaluate Model using Tensorflow Model Analysis.

Args:
classifier_model_path: Trained classifier model to be evaluted.
validate_tf_file_path: File containing validation TFRecordDataset.
tfma_eval_result_path: Path to export tfma-related eval path.
eval_config: tfma eval_config.
"""

eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=classifier_model_path, eval_config=eval_config
)

# Run the fairness evaluation.
tfma.run_model_analysis(
eval_shared_model=eval_shared_model,
data_location=validate_tf_file_path,
output_path=tfma_eval_result_path,
eval_config=eval_config,
)
"""Evaluate Model using Tensorflow Model Analysis.

Args:
----
classifier_model_path: Trained classifier model to be evaluted.
validate_tf_file_path: File containing validation TFRecordDataset.
tfma_eval_result_path: Path to export tfma-related eval path.
eval_config: tfma eval_config.
"""
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=classifier_model_path, eval_config=eval_config
)

# Run the fairness evaluation.
tfma.run_model_analysis(
eval_shared_model=eval_shared_model,
data_location=validate_tf_file_path,
output_path=tfma_eval_result_path,
eval_config=eval_config,
)
Loading