|
| 1 | +# This workflow will: |
| 2 | +# - Create a new Github release |
| 3 | +# - Build wheels for supported architectures |
| 4 | +# - Deploy the wheels to the Github release |
| 5 | +# - Release the static code to PyPi |
| 6 | +# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries |
| 7 | + |
| 8 | +name: Build wheels and deploy |
| 9 | + |
| 10 | +on: |
| 11 | + create: |
| 12 | + tags: |
| 13 | + - v* |
| 14 | + |
| 15 | +jobs: |
| 16 | + |
| 17 | + setup_release: |
| 18 | + name: Create Release |
| 19 | + runs-on: ubuntu-latest |
| 20 | + steps: |
| 21 | + - name: Get the tag version |
| 22 | + id: extract_branch |
| 23 | + run: echo ::set-output name=branch::${GITHUB_REF#refs/tags/} |
| 24 | + shell: bash |
| 25 | + |
| 26 | + - name: Create Release |
| 27 | + id: create_release |
| 28 | + uses: actions/create-release@v1 |
| 29 | + env: |
| 30 | + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |
| 31 | + with: |
| 32 | + tag_name: ${{ steps.extract_branch.outputs.branch }} |
| 33 | + release_name: ${{ steps.extract_branch.outputs.branch }} |
| 34 | + |
| 35 | + build_wheels: |
| 36 | + name: Build Wheel |
| 37 | + needs: setup_release |
| 38 | + runs-on: ${{ matrix.os }} |
| 39 | + |
| 40 | + strategy: |
| 41 | + fail-fast: false |
| 42 | + matrix: |
| 43 | + # Using ubuntu-20.04 instead of 22.04 for more compatibility (glibc). Ideally we'd use the |
| 44 | + # manylinux docker image, but I haven't figured out how to install CUDA on manylinux. |
| 45 | + os: [ubuntu-20.04] |
| 46 | + python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] |
| 47 | + torch-version: ['1.12.1', '1.13.1', '2.0.1', '2.1.1', '2.2.0.dev20231127'] |
| 48 | + cuda-version: ['11.8.0', '12.2.0'] |
| 49 | + # We need separate wheels that either uses C++11 ABI (-D_GLIBCXX_USE_CXX11_ABI) or not. |
| 50 | + # Pytorch wheels currently don't use it, but nvcr images have Pytorch compiled with C++11 ABI. |
| 51 | + # Without this we get import error (undefined symbol: _ZN3c105ErrorC2ENS_14SourceLocationESs) |
| 52 | + # when building without C++11 ABI and using it on nvcr images. |
| 53 | + cxx11_abi: ['FALSE', 'TRUE'] |
| 54 | + exclude: |
| 55 | + # Pytorch <= 1.12 does not support Python 3.11 |
| 56 | + - torch-version: '1.12.1' |
| 57 | + python-version: '3.11' |
| 58 | + # Pytorch >= 2.0 only supports Python >= 3.8 |
| 59 | + - torch-version: '2.0.1' |
| 60 | + python-version: '3.7' |
| 61 | + - torch-version: '2.1.1' |
| 62 | + python-version: '3.7' |
| 63 | + - torch-version: '2.2.0.dev20231127' |
| 64 | + python-version: '3.7' |
| 65 | + # Pytorch <= 2.0 only supports CUDA <= 11.8 |
| 66 | + - torch-version: '1.12.1' |
| 67 | + cuda-version: '12.2.0' |
| 68 | + - torch-version: '1.13.1' |
| 69 | + cuda-version: '12.2.0' |
| 70 | + - torch-version: '2.0.1' |
| 71 | + cuda-version: '12.2.0' |
| 72 | + |
| 73 | + steps: |
| 74 | + - name: Checkout |
| 75 | + uses: actions/checkout@v3 |
| 76 | + |
| 77 | + - name: Set up Python |
| 78 | + uses: actions/setup-python@v4 |
| 79 | + with: |
| 80 | + python-version: ${{ matrix.python-version }} |
| 81 | + |
| 82 | + - name: Set CUDA and PyTorch versions |
| 83 | + run: | |
| 84 | + echo "MATRIX_CUDA_VERSION=$(echo ${{ matrix.cuda-version }} | awk -F \. {'print $1 $2'})" >> $GITHUB_ENV |
| 85 | + echo "MATRIX_TORCH_VERSION=$(echo ${{ matrix.torch-version }} | awk -F \. {'print $1 "." $2'})" >> $GITHUB_ENV |
| 86 | +
|
| 87 | + - name: Free up disk space |
| 88 | + if: ${{ runner.os == 'Linux' }} |
| 89 | + # https://github.com/easimon/maximize-build-space/blob/master/action.yml |
| 90 | + # https://github.com/easimon/maximize-build-space/tree/test-report |
| 91 | + run: | |
| 92 | + sudo rm -rf /usr/share/dotnet |
| 93 | + sudo rm -rf /opt/ghc |
| 94 | + sudo rm -rf /opt/hostedtoolcache/CodeQL |
| 95 | +
|
| 96 | + - name: Set up swap space |
| 97 | + if: runner.os == 'Linux' |
| 98 | + uses: pierotofy/set-swap-space@v1.0 |
| 99 | + with: |
| 100 | + swap-size-gb: 10 |
| 101 | + |
| 102 | + - name: Install CUDA ${{ matrix.cuda-version }} |
| 103 | + if: ${{ matrix.cuda-version != 'cpu' }} |
| 104 | + uses: Jimver/cuda-toolkit@v0.2.11 |
| 105 | + id: cuda-toolkit |
| 106 | + with: |
| 107 | + cuda: ${{ matrix.cuda-version }} |
| 108 | + linux-local-args: '["--toolkit"]' |
| 109 | + # default method is "local", and we're hitting some error with caching for CUDA 11.8 and 12.1 |
| 110 | + # method: ${{ (matrix.cuda-version == '11.8.0' || matrix.cuda-version == '12.1.0') && 'network' || 'local' }} |
| 111 | + method: 'network' |
| 112 | + # We need the cuda libraries (e.g. cuSparse, cuSolver) for compiling PyTorch extensions, |
| 113 | + # not just nvcc |
| 114 | + # sub-packages: '["nvcc"]' |
| 115 | + |
| 116 | + - name: Install PyTorch ${{ matrix.torch-version }}+cu${{ matrix.cuda-version }} |
| 117 | + run: | |
| 118 | + pip install --upgrade pip |
| 119 | + # If we don't install before installing Pytorch, we get error for torch 2.0.1 |
| 120 | + # ERROR: Could not find a version that satisfies the requirement setuptools>=40.8.0 (from versions: none) |
| 121 | + pip install lit |
| 122 | + # We want to figure out the CUDA version to download pytorch |
| 123 | + # e.g. we can have system CUDA version being 11.7 but if torch==1.12 then we need to download the wheel from cu116 |
| 124 | + # This code is ugly, maybe there's a better way to do this. |
| 125 | + export TORCH_CUDA_VERSION=$(python -c "import os; minv = {'1.12': 113, '1.13': 116, '2.0': 117, '2.1': 118, '2.2': 118}[os.environ['MATRIX_TORCH_VERSION']]; maxv = {'1.12': 116, '1.13': 117, '2.0': 118, '2.1': 121, '2.2': 121}[os.environ['MATRIX_TORCH_VERSION']]; print(max(min(int(os.environ['MATRIX_CUDA_VERSION']), maxv), minv))") |
| 126 | + if [[ ${{ matrix.torch-version }} == *"dev"* ]]; then |
| 127 | + pip install --no-cache-dir --pre torch==${{ matrix.torch-version }} --index-url https://download.pytorch.org/whl/nightly/cu${TORCH_CUDA_VERSION} |
| 128 | + else |
| 129 | + pip install --no-cache-dir torch==${{ matrix.torch-version }} --index-url https://download.pytorch.org/whl/cu${TORCH_CUDA_VERSION} |
| 130 | + fi |
| 131 | + nvcc --version |
| 132 | + python --version |
| 133 | + python -c "import torch; print('PyTorch:', torch.__version__)" |
| 134 | + python -c "import torch; print('CUDA:', torch.version.cuda)" |
| 135 | + python -c "from torch.utils import cpp_extension; print (cpp_extension.CUDA_HOME)" |
| 136 | + shell: |
| 137 | + bash |
| 138 | + |
| 139 | + - name: Build wheel |
| 140 | + run: | |
| 141 | + # We want setuptools >= 49.6.0 otherwise we can't compile the extension if system CUDA version is 11.7 and pytorch cuda version is 11.6 |
| 142 | + # https://github.com/pytorch/pytorch/blob/664058fa83f1d8eede5d66418abff6e20bd76ca8/torch/utils/cpp_extension.py#L810 |
| 143 | + # However this still fails so I'm using a newer version of setuptools |
| 144 | + pip install setuptools==68.0.0 |
| 145 | + pip install ninja packaging wheel |
| 146 | + export PATH=/usr/local/nvidia/bin:/usr/local/nvidia/lib64:$PATH |
| 147 | + export LD_LIBRARY_PATH=/usr/local/nvidia/lib64:/usr/local/cuda/lib64:$LD_LIBRARY_PATH |
| 148 | + # Limit MAX_JOBS otherwise the github runner goes OOM |
| 149 | + MAX_JOBS=2 MAMBA_FORCE_BUILD="TRUE" MAMBA_FORCE_CXX11_ABI=${{ matrix.cxx11_abi}} python setup.py bdist_wheel --dist-dir=dist |
| 150 | + tmpname=cu${MATRIX_CUDA_VERSION}torch${MATRIX_TORCH_VERSION}cxx11abi${{ matrix.cxx11_abi }} |
| 151 | + wheel_name=$(ls dist/*whl | xargs -n 1 basename | sed "s/-/+$tmpname-/2") |
| 152 | + ls dist/*whl |xargs -I {} mv {} dist/${wheel_name} |
| 153 | + echo "wheel_name=${wheel_name}" >> $GITHUB_ENV |
| 154 | +
|
| 155 | + - name: Log Built Wheels |
| 156 | + run: | |
| 157 | + ls dist |
| 158 | +
|
| 159 | + - name: Get the tag version |
| 160 | + id: extract_branch |
| 161 | + run: echo ::set-output name=branch::${GITHUB_REF#refs/tags/} |
| 162 | + |
| 163 | + - name: Get Release with tag |
| 164 | + id: get_current_release |
| 165 | + uses: joutvhu/get-release@v1 |
| 166 | + with: |
| 167 | + tag_name: ${{ steps.extract_branch.outputs.branch }} |
| 168 | + env: |
| 169 | + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |
| 170 | + |
| 171 | + - name: Upload Release Asset |
| 172 | + id: upload_release_asset |
| 173 | + uses: actions/upload-release-asset@v1 |
| 174 | + env: |
| 175 | + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |
| 176 | + with: |
| 177 | + upload_url: ${{ steps.get_current_release.outputs.upload_url }} |
| 178 | + asset_path: ./dist/${{env.wheel_name}} |
| 179 | + asset_name: ${{env.wheel_name}} |
| 180 | + asset_content_type: application/* |
| 181 | + |
| 182 | + publish_package: |
| 183 | + name: Publish package |
| 184 | + needs: [build_wheels] |
| 185 | + |
| 186 | + runs-on: ubuntu-latest |
| 187 | + |
| 188 | + steps: |
| 189 | + - uses: actions/checkout@v3 |
| 190 | + |
| 191 | + - uses: actions/setup-python@v4 |
| 192 | + with: |
| 193 | + python-version: '3.10' |
| 194 | + |
| 195 | + - name: Install dependencies |
| 196 | + run: | |
| 197 | + pip install ninja packaging setuptools wheel twine |
| 198 | + # We don't want to download anything CUDA-related here |
| 199 | + pip install torch --index-url https://download.pytorch.org/whl/cpu |
| 200 | +
|
| 201 | + - name: Build core package |
| 202 | + env: |
| 203 | + MAMBA_SKIP_CUDA_BUILD: "TRUE" |
| 204 | + run: | |
| 205 | + python setup.py sdist --dist-dir=dist |
| 206 | +
|
| 207 | + - name: Deploy |
| 208 | + env: |
| 209 | + TWINE_USERNAME: "__token__" |
| 210 | + TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} |
| 211 | + run: | |
| 212 | + python -m twine upload dist/* |
0 commit comments