forked from jax-ml/jax
-
Notifications
You must be signed in to change notification settings - Fork 0
/
.bazelrc
394 lines (325 loc) · 18.6 KB
/
.bazelrc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
# #############################################################################
# All default build options below. These apply to all build commands.
# #############################################################################
# Make Bazel print out all options from rc files.
build --announce_rc
# By default, execute all actions locally.
build --spawn_strategy=local
# Enable host OS specific configs. For instance, "build:linux" will be used
# automatically when building on Linux.
build --enable_platform_specific_config
build --experimental_cc_shared_library
# Do not use C-Ares when building gRPC.
build --define=grpc_no_ares=true
build --define=tsl_link_protobuf=true
# Enable optimization.
build -c opt
# Suppress all warning messages.
build --output_filter=DONT_MATCH_ANYTHING
build --copt=-DMLIR_PYTHON_PACKAGE_PREFIX=jaxlib.mlir.
# #############################################################################
# Platform Specific configs below. These are automatically picked up by Bazel
# depending on the platform that is running the build.
# #############################################################################
build:linux --config=posix
build:linux --copt=-Wno-unknown-warning-option
# Workaround for gcc 10+ warnings related to upb.
# See https://github.com/tensorflow/tensorflow/issues/39467
build:linux --copt=-Wno-stringop-truncation
build:linux --copt=-Wno-array-parameter
build:macos --config=posix
build:macos --apple_platform_type=macos
# Windows has a relatively short command line limit, which JAX has begun to hit.
# See https://docs.bazel.build/versions/main/windows.html
build:windows --features=compiler_param_file
build:windows --features=archive_param_file
# XLA uses M_* math constants that only get defined by MSVC headers if
# _USE_MATH_DEFINES is defined.
build:windows --copt=/D_USE_MATH_DEFINES
build:windows --host_copt=/D_USE_MATH_DEFINES
# Make sure to include as little of windows.h as possible
build:windows --copt=-DWIN32_LEAN_AND_MEAN
build:windows --host_copt=-DWIN32_LEAN_AND_MEAN
build:windows --copt=-DNOGDI
build:windows --host_copt=-DNOGDI
# https://devblogs.microsoft.com/cppblog/announcing-full-support-for-a-c-c-conformant-preprocessor-in-msvc/
# otherwise, there will be some compiling error due to preprocessing.
build:windows --copt=/Zc:preprocessor
build:windows --cxxopt=/std:c++17
build:windows --host_cxxopt=/std:c++17
# Generate PDB files, to generate useful PDBs, in opt compilation_mode
# --copt /Z7 is needed.
build:windows --linkopt=/DEBUG
build:windows --host_linkopt=/DEBUG
build:windows --linkopt=/OPT:REF
build:windows --host_linkopt=/OPT:REF
build:windows --linkopt=/OPT:ICF
build:windows --host_linkopt=/OPT:ICF
build:windows --incompatible_strict_action_env=true
# #############################################################################
# Feature-specific configurations. These are used by the CI configs below
# depending on the type of build. E.g. `ci_linux_x86_64` inherits the Linux x86
# configs such as `avx_linux` and `mkl_open_source_only`, `ci_linux_x86_64_cuda`
# inherits `cuda` and `build_cuda_with_nvcc`, etc.
# #############################################################################
build:nonccl --define=no_nccl_support=true
build:posix --copt=-fvisibility=hidden
build:posix --copt=-Wno-sign-compare
build:posix --cxxopt=-std=c++17
build:posix --host_cxxopt=-std=c++17
build:avx_posix --copt=-mavx
build:avx_posix --host_copt=-mavx
build:native_arch_posix --copt=-march=native
build:native_arch_posix --host_copt=-march=native
build:avx_linux --copt=-mavx
build:avx_linux --host_copt=-mavx
build:avx_windows --copt=/arch:AVX
build:mkl_open_source_only --define=tensorflow_mkldnn_contraction_kernel=1
# Config setting to build oneDNN with Compute Library for the Arm Architecture (ACL).
build:mkl_aarch64_threadpool --define=build_with_mkl_aarch64=true
build:mkl_aarch64_threadpool --@compute_library//:openmp=false
build:mkl_aarch64_threadpool -c opt
# Disable clang extention that rejects type definitions within offsetof.
# This was added in clang-16 by https://reviews.llvm.org/D133574.
# Can be removed once upb is updated, since a type definition is used within
# offset of in the current version of ubp.
# See https://github.com/protocolbuffers/upb/blob/9effcbcb27f0a665f9f345030188c0b291e32482/upb/upb.c#L183.
build:clang --copt=-Wno-gnu-offsetof-extensions
# Disable clang extention that rejects unknown arguments.
build:clang --copt=-Qunused-arguments
# Error on struct/class mismatches, since this causes link failures on Windows.
build:clang --copt=-Werror=mismatched-tags
# Configs for CUDA
build:cuda --repo_env TF_NEED_CUDA=1
build:cuda --repo_env TF_NCCL_USE_STUB=1
# "sm" means we emit only cubin, which is forward compatible within a GPU generation.
# "compute" means we emit both cubin and PTX, which is larger but also forward compatible to future GPU generations.
build:cuda --repo_env HERMETIC_CUDA_COMPUTE_CAPABILITIES="sm_50,sm_60,sm_70,sm_80,compute_90"
build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain
build:cuda --@local_config_cuda//:enable_cuda
build:cuda --@xla//xla/python:jax_cuda_pip_rpaths=true
# Default hermetic CUDA and CUDNN versions.
build:cuda --repo_env=HERMETIC_CUDA_VERSION="12.3.2"
build:cuda --repo_env=HERMETIC_CUDNN_VERSION="9.1.1"
# This flag is needed to include CUDA libraries for bazel tests.
test:cuda --@local_config_cuda//cuda:include_cuda_libs=true
# Force the linker to set RPATH, not RUNPATH. When resolving dynamic libraries,
# ld.so prefers in order: RPATH, LD_LIBRARY_PATH, RUNPATH. JAX sets RPATH to
# point to the $ORIGIN-relative location of the pip-installed NVIDIA CUDA
# packages.
# This has pros and cons:
# * pro: we'll ignore other CUDA installations, which has frequently confused
# users in the past. By setting RPATH, we'll always use the NVIDIA pip
# packages if they are installed.
# * con: the user cannot override the CUDA installation location
# via LD_LIBRARY_PATH, if the nvidia-... pip packages are installed. This is
# acceptable, because the workaround is "remove the nvidia-..." pip packages.
# The list of CUDA pip packages that JAX depends on are present in setup.py.
build:cuda --linkopt=-Wl,--disable-new-dtags
# Build CUDA and other C++ targets with Clang
build:build_cuda_with_clang --@local_config_cuda//:cuda_compiler=clang
# Build CUDA with NVCC and other C++ targets with Clang
build:build_cuda_with_nvcc --action_env=TF_NVCC_CLANG="1"
build:build_cuda_with_nvcc --@local_config_cuda//:cuda_compiler=nvcc
# Requires MSVC and LLVM to be installed
build:win_clang --extra_toolchains=@local_config_cc//:cc-toolchain-x64_windows-clang-cl
build:win_clang --extra_execution_platforms=//jax/tools/toolchains:x64_windows-clang-cl
build:win_clang --compiler=clang-cl
build:rocm_base --crosstool_top=@local_config_rocm//crosstool:toolchain
build:rocm_base --define=using_rocm=true --define=using_rocm_hipcc=true
build:rocm_base --repo_env TF_NEED_ROCM=1
build:rocm_base --action_env TF_ROCM_AMDGPU_TARGETS="gfx900,gfx906,gfx908,gfx90a,gfx940,gfx941,gfx942,gfx1030,gfx1100"
# Build with hipcc for ROCm and clang for the host.
build:rocm --config=rocm_base
build:rocm --action_env=TF_ROCM_CLANG="1"
build:rocm --action_env=CLANG_COMPILER_PATH="/usr/lib/llvm-18/bin/clang"
build:rocm --copt=-Wno-gnu-offsetof-extensions
build:rocm --copt=-Qunused-arguments
build:rocm --action_env=TF_HIPCC_CLANG="1"
# #############################################################################
# Cache options below.
# #############################################################################
# Public read-only cache for Mac builds. JAX uses a GCS bucket to store cache
# from JAX's Mac CI build. By applying --config=macos_cache, any local Mac build
# should be able to read from this cache and potentially see a speedup. The
# "oct2023" in the URL is just the date when the bucket was created and can be
# disregarded. It still contains the latest cache that is being used.
build:macos_cache --remote_cache="https://storage.googleapis.com/tensorflow-macos-bazel-cache/oct2023" --remote_upload_local_results=false
# Cache pushes are limited to JAX's CI system.
build:macos_cache_push --config=macos_cache --remote_upload_local_results=true --google_default_credentials
# #############################################################################
# CI Build config options below.
# JAX uses these configs in CI builds for building artifacts and when running
# Bazel tests.
# #############################################################################
# Linux x86 CI configs
build:ci_linux_x86_64 --config=avx_linux --config=avx_posix
build:ci_linux_x86_64 --config=mkl_open_source_only
build:ci_linux_x86_64 --config=clang --verbose_failures=true
build:ci_linux_x86_64 --color=yes
# TODO(b/356695103): We do not have a CPU only toolchain so we use the CUDA
# toolchain for both CPU and GPU builds.
build:ci_linux_x86_64 --host_crosstool_top="@local_config_cuda//crosstool:toolchain"
build:ci_linux_x86_64 --crosstool_top="@local_config_cuda//crosstool:toolchain"
build:ci_linux_x86_64 --extra_toolchains="@local_config_cuda//crosstool:toolchain-linux-x86_64"
build:ci_linux_x86_64 --repo_env=TF_SYSROOT="/dt9"
# Clang path needs to be set for remote toolchain to be configured correctly.
build:ci_linux_x86_64 --action_env=CLANG_CUDA_COMPILER_PATH="/usr/lib/llvm-18/bin/clang"
# The toolchain in `--config=cuda` needs to be read before the toolchain in
# `--config=ci_linux_x86_64`. Otherwise, we run into issues with manylinux
# compliance.
build:ci_linux_x86_64_cuda --config=cuda --config=build_cuda_with_nvcc
build:ci_linux_x86_64_cuda --config=ci_linux_x86_64
# Linux Aarch64 CI configs
build:ci_linux_aarch64_base --config=clang --verbose_failures=true
build:ci_linux_aarch64_base --action_env=TF_SYSROOT="/dt10"
build:ci_linux_aarch64_base --color=yes
build:ci_linux_aarch64 --config=ci_linux_aarch64_base
build:ci_linux_aarch64 --host_crosstool_top="@ml2014_clang_aarch64_config_aarch64//crosstool:toolchain"
build:ci_linux_aarch64 --crosstool_top="@ml2014_clang_aarch64_config_aarch64//crosstool:toolchain"
# CUDA configs for Linux Aarch64 do not pass in the crosstool_top flag from
# above because the Aarch64 toolchain rule does not support building with NVCC.
# Instead, we use `@local_config_cuda//crosstool:toolchain` from --config=cuda
# and set `CLANG_CUDA_COMPILER_PATH` to define the toolchain so that we can
# use Clang for the C++ targets and NVCC to build CUDA targets.
build:ci_linux_aarch64_cuda --config=ci_linux_aarch64_base
build:ci_linux_aarch64_cuda --config=cuda --config=build_cuda_with_nvcc
build:ci_linux_aarch64_cuda --action_env=CLANG_CUDA_COMPILER_PATH="/usr/lib/llvm-18/bin/clang"
# Mac x86 CI configs
build:ci_darwin_x86_64 --macos_minimum_os=10.14
build:ci_darwin_x86_64 --config=macos_cache_push
build:ci_darwin_x86_64 --verbose_failures=true
build:ci_darwin_x86_64 --color=yes
# Mac Arm64 CI configs
build:ci_darwin_arm64 --macos_minimum_os=11.0
build:ci_darwin_arm64 --config=macos_cache_push
build:ci_darwin_arm64 --verbose_failures=true
build:ci_darwin_arm64 --color=yes
# Windows x86 CI configs
build:ci_windows_amd64 --config=avx_windows
build:ci_windows_amd64 --compiler=clang-cl --config=clang --verbose_failures=true
build:ci_windows_amd64 --crosstool_top="@xla//tools/toolchains/win/20240424:toolchain"
build:ci_windows_amd64 --extra_toolchains="@xla//tools/toolchains/win/20240424:cc-toolchain-x64_windows-clang-cl"
build:ci_windows_amd64 --host_linkopt=/FORCE:MULTIPLE --linkopt=/FORCE:MULTIPLE
build:ci_windows_amd64 --color=yes
# #############################################################################
# RBE config options below. These inherit the CI configs above and set the
# remote execution backend and authentication options required to run builds
# with RBE. Linux x86 and Windows builds use RBE.
# #############################################################################
# Flag to enable remote config
common --experimental_repo_remote_exec
# Allow creation of resultstore URLs for any bazel invocation
build:resultstore --google_default_credentials
build:resultstore --bes_backend=buildeventservice.googleapis.com
build:resultstore --bes_instance_name="tensorflow-testing"
build:resultstore --bes_results_url="https://source.cloud.google.com/results/invocations"
build:resultstore --bes_timeout=600s
build:rbe --config=resultstore
build:rbe --repo_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
build:rbe --define=EXECUTOR=remote
build:rbe --flaky_test_attempts=3
build:rbe --jobs=200
build:rbe --remote_executor=grpcs://remotebuildexecution.googleapis.com
build:rbe --remote_timeout=3600
build:rbe --spawn_strategy=remote,worker,standalone,local
# Attempt to minimize the amount of data transfer between bazel and the remote
# workers:
build:rbe --remote_download_toplevel
test:rbe --test_env=USER=anon
# RBE configs for Linux x86
# Set the remote worker pool
common:rbe_linux_x86_64_base --remote_instance_name=projects/tensorflow-testing/instances/default_instance
build:rbe_linux_x86_64_base --config=rbe
build:rbe_linux_x86_64_base --action_env=PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin"
build:rbe_linux_x86_64_base --linkopt=-lrt
build:rbe_linux_x86_64_base --host_linkopt=-lrt
build:rbe_linux_x86_64_base --linkopt=-lm
build:rbe_linux_x86_64_base --host_linkopt=-lm
# Set the host, execution, and target platform
build:rbe_linux_x86_64_base --host_platform="@ubuntu20.04-clang_manylinux2014-cuda12.3-cudnn9.1_config_platform//:platform"
build:rbe_linux_x86_64_base --extra_execution_platforms="@ubuntu20.04-clang_manylinux2014-cuda12.3-cudnn9.1_config_platform//:platform"
build:rbe_linux_x86_64_base --platforms="@ubuntu20.04-clang_manylinux2014-cuda12.3-cudnn9.1_config_platform//:platform"
build:rbe_linux_x86_64 --config=rbe_linux_x86_64_base
build:rbe_linux_x86_64 --config=ci_linux_x86_64
build:rbe_linux_x86_64_cuda --config=rbe_linux_x86_64_base
build:rbe_linux_x86_64_cuda --config=ci_linux_x86_64_cuda
build:rbe_linux_x86_64_cuda --repo_env=REMOTE_GPU_TESTING=1
# RBE configs for Windows
# Set the remote worker pool
common:rbe_windows_amd64 --remote_instance_name=projects/tensorflow-testing/instances/windows
build:rbe_windows_amd64 --config=rbe
# Set the host, execution, and target platform
build:rbe_windows_amd64 --host_platform="@xla//tools/toolchains/win:x64_windows-clang-cl"
build:rbe_windows_amd64 --extra_execution_platforms="@xla//tools/toolchains/win:x64_windows-clang-cl"
build:rbe_windows_amd64 --platforms="@xla//tools/toolchains/win:x64_windows-clang-cl"
build:rbe_windows_amd64 --shell_executable=C:\\tools\\msys64\\usr\\bin\\bash.exe
build:rbe_windows_amd64 --enable_runfiles
build:rbe_windows_amd64 --define=override_eigen_strong_inline=true
# Don't build the python zip archive in the RBE build.
build:rbe_windows_amd64 --nobuild_python_zip
build:rbe_windows_amd64 --config=ci_windows_amd64
# #############################################################################
# Cross-compile config options below. Native RBE support does not exist for
# Linux Aarch64 and Mac x86. So, we use a cross-compile toolchain to build
# targets for Linux Aarch64 and Mac x86 on the Linux x86 RBE pool.
# #############################################################################
# Set execution platform to Linux x86
# Note: Lot of the "host_" flags such as "host_cpu" and "host_crosstool_top"
# flags seem to be actually used to specify the execution platform details. It
# seems it is this way because these flags are old and predate the distinction
# between host and execution platform.
build:cross_compile_base --host_cpu=k8
build:cross_compile_base --host_crosstool_top=@xla//tools/toolchains/cross_compile/cc:cross_compile_toolchain_suite
build:cross_compile_base --extra_execution_platforms=@xla//tools/toolchains/cross_compile/config:linux_x86_64
# Linux Aarch64
build:cross_compile_linux_aarch64 --config=cross_compile_base
# Set the target CPU to Aarch64
build:cross_compile_linux_aarch64 --platforms=@xla//tools/toolchains/cross_compile/config:linux_aarch64
build:cross_compile_linux_aarch64 --cpu=aarch64
build:cross_compile_linux_aarch64 --crosstool_top=@xla//tools/toolchains/cross_compile/cc:cross_compile_toolchain_suite
build:rbe_cross_compile_base --config=rbe
build:rbe_cross_compile_base --remote_instance_name=projects/tensorflow-testing/instances/default_instance
# RBE cross-compile configs for Linux Aarch64
build:rbe_cross_compile_linux_aarch64 --config=cross_compile_linux_aarch64
build:rbe_cross_compile_linux_aarch64 --config=rbe_cross_compile_base
# Mac x86
build:cross_compile_darwin_x86_64 --config=cross_compile_base
build:cross_compile_darwin_x86_64 --config=nonccl
# Target Catalina (10.15) as the minimum supported OS
build:cross_compile_darwin_x86_64 --action_env MACOSX_DEPLOYMENT_TARGET=10.15
# Set the target CPU to Darwin x86
build:cross_compile_darwin_x86_64 --platforms=@xla//tools/toolchains/cross_compile/config:darwin_x86_64
build:cross_compile_darwin_x86_64 --cpu=darwin
build:cross_compile_darwin_x86_64 --crosstool_top=@xla//tools/toolchains/cross_compile/cc:cross_compile_toolchain_suite
# When RBE cross-compiling for macOS, we need to explicitly register the
# toolchain. Otherwise, oddly, RBE complains that a "docker container must be
# specified".
build:cross_compile_darwin_x86_64 --extra_toolchains=@xla//tools/toolchains/cross_compile/config:macos-x86-cross-compile-cc-toolchain
# Map --platforms=darwin_x86_64 to --cpu=darwin and vice-versa to make selects()
# and transistions that use these flags work. The flag --platform_mappings needs
# to be set to a file that exists relative to the package path roots.
build:cross_compile_darwin_x86_64 --platform_mappings=platform_mappings
# RBE cross-compile configs for Darwin x86
build:rbe_cross_compile_darwin_x86_64 --config=cross_compile_darwin_x86_64
build:rbe_cross_compile_darwin_x86_64 --config=rbe_cross_compile_base
#############################################################################
# Some configs to make getting some forms of debug builds. In general, the
# codebase is only regularly built with optimizations. Use 'debug_symbols' to
# just get symbols for the parts of XLA/PJRT that jaxlib uses.
# Or try 'debug' to get a build with assertions enabled and minimal
# optimizations.
# Include these in a local .bazelrc.user file as:
# build --config=debug_symbols
# Or:
# build --config=debug
#
# Additional files can be opted in for debug symbols by adding patterns
# to a per_file_copt similar to below.
#############################################################################
build:debug_symbols --strip=never --per_file_copt="xla/pjrt|xla/python@-g3"
build:debug --config debug_symbols -c fastbuild
# Load `.jax_configure.bazelrc` file written by build.py
try-import %workspace%/.jax_configure.bazelrc
# Load rc file with user-specific options.
try-import %workspace%/.bazelrc.user