Skip to content

[Feature] Add free threading support #3437

[Feature] Add free threading support

[Feature] Add free threading support #3437

Workflow file for this run

name: Continuous Benchmark (PR)
on:
pull_request:
permissions: write-all
concurrency:
# Documentation suggests ${{ github.head_ref }}, but that's only available on pull_request/pull_request_target triggers, so using ${{ github.ref }}.
# On master, we want all builds to complete even if merging happens faster to make it easier to discover at which point something broke.
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && format('ci-master-{0}', github.sha) || format('ci-{0}', github.ref) }}
cancel-in-progress: true
jobs:
benchmark:
name: ${{ matrix.device }} Pytest benchmark
runs-on: linux.g5.4xlarge.nvidia.gpu
strategy:
matrix:
device: [CPU, GPU]
python-version: ['3.12']
defaults:
run:
shell: bash -l {0}
container:
image: nvidia/cuda:12.3.0-runtime-ubuntu22.04
options: --gpus all
steps:
- name: Who triggered this?
run: |
echo "Action triggered by ${{ github.event.pull_request.html_url }}"
- name: Install deps
run: |
export TZ=Europe/London
export DEBIAN_FRONTEND=noninteractive # tzdata bug
apt-get update -y
apt-get update -y
apt-get upgrade -y
apt-get -y install gcc curl g++ unzip wget sudo git cmake
- name: Check ldd --version
run: ldd --version
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 50 # this is to make sure we obtain the target base commit
- name: Python Setup
uses: actions/setup-python@v4
with:
python-version: '3.12'
- name: Setup git
run: git config --global --add safe.directory /__w/tensordict/tensordict
- name: setup Path
run: |
echo /usr/local/bin >> $GITHUB_PATH
- name: Setup Environment
run: |
set -e
set -x
curl -LsSf https://astral.sh/uv/install.sh | sh
echo "$HOME/.local/bin" >> $GITHUB_PATH
source $HOME/.local/bin/env
# Create a local venv for the benchmark run
uv venv .venv/local --python ${{ matrix.python-version }}
source .venv/local/bin/activate
echo "=== uv version ==="
uv --version
uv pip install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/cu126
uv pip install "pybind11[global]" "setuptools" "wheel" "ninja"
uv pip install pytest pytest-benchmark
uv pip install -e . --no-deps
${{ matrix.device == 'CPU' && 'export CUDA_VISIBLE_DEVICES=' || '' }}
- name: check GPU presence
if: matrix.device == 'GPU'
run: |
source .venv/local/bin/activate
uv run --active python -c """import torch
print(f'CUDA available: {torch.cuda.is_available()}')
print(f'CUDA device count: {torch.cuda.device_count()}')
print(f'CUDA version: {torch.version.cuda}')
assert torch.cuda.is_available() and torch.cuda.device_count() > 0, 'CUDA is not available or no GPU devices found'
"""
- name: Setup benchmarks
run: |
source .venv/local/bin/activate
echo "BASE_SHA=$(echo ${{ github.event.pull_request.base.sha }} | cut -c1-8)" >> $GITHUB_ENV
echo "HEAD_SHA=$(echo ${{ github.event.pull_request.head.sha }} | cut -c1-8)" >> $GITHUB_ENV
echo "BASELINE_JSON=$(mktemp)" >> $GITHUB_ENV
echo "CONTENDER_JSON=$(mktemp)" >> $GITHUB_ENV
echo "PR_COMMENT=$(mktemp)" >> $GITHUB_ENV
- name: Run benchmarks
run: |
set -e
set -x
source .venv/local/bin/activate
cd benchmarks/
# Check Python version to ensure compatibility with PyTorch Dynamo
uv run --active python --version
uv run --active python -c "import sys; actual_version = f'{sys.version_info.major}.{sys.version_info.minor}'; expected_version = '${{ matrix.python-version }}'; print(f'Expected: {expected_version}, Actual: {actual_version}'); exit(1) if actual_version != expected_version else None; exit(1) if sys.version_info >= (3, 13) else print(f'Python version check passed: {actual_version}')"
export TORCHDYNAMO_INLINE_INBUILT_NN_MODULES=1
export TD_GET_DEFAULTS_TO_NONE=1
RUN_BENCHMARK="uv run --active pytest -vvv --rank 0 --benchmark-json "
git checkout ${{ github.event.pull_request.base.sha }}
$RUN_BENCHMARK ${{ env.BASELINE_JSON }}
git checkout ${{ github.event.pull_request.head.sha }}
$RUN_BENCHMARK ${{ env.CONTENDER_JSON }}
- name: Publish results
uses: apbard/pytest-benchmark-commenter@v3
with:
token: ${{ secrets.GITHUB_TOKEN }}
benchmark-file: ${{ env.CONTENDER_JSON }}
comparison-benchmark-file: ${{ env.BASELINE_JSON }}
benchmark-metrics: 'name,max,mean,ops'
comparison-benchmark-metric: 'ops'
comparison-higher-is-better: true
comparison-threshold: 5
benchmark-title: 'Result of ${{ matrix.device }} Benchmark Tests'