From 2eadd952cf555d8b3bfd2cf104e0560e4c96b7e5 Mon Sep 17 00:00:00 2001 From: "Mr.Nineteen" Date: Wed, 26 May 2021 10:35:40 +0800 Subject: [PATCH] Support TF Serving compilation --- README.md | 59 ++-------------- build_deps/tf_dependency/build_defs.bzl.tpl | 2 + build_deps/tf_dependency/tf_configure.bzl | 5 ++ configure.py | 2 + .../dynamic_embedding/core/BUILD | 9 ++- .../tensorflow_recommenders_addons.bzl | 68 ++++++++++++++----- 6 files changed, 72 insertions(+), 73 deletions(-) diff --git a/README.md b/README.md index 6edbbaa97..d42de51c6 100644 --- a/README.md +++ b/README.md @@ -188,66 +188,15 @@ sess_config.gpu_options.allow_growth = True | 0.2.0 | 2.4.1 | 2.4.0 | GCC 7.3.1 | 11.0 | 8.0 | 3.5, 5.2, 6.0, 6.1, 7.0, 7.5, 8.0 | | 0.1.0 | 2.4.1 | 2.4.0 | GCC 7.3.1 | - | - | - | -#### CPU Serving TensorFlow models with custom ops -Reference documents: https://www.tensorflow.org/tfx/serving/custom_op +**NOTICE**:Reference documents: https://www.tensorflow.org/tfx/serving/custom_op -TFRA modification([`tensorflow_recommenders_addons.bzl`](tensorflow_recommenders_addons/tensorflow_recommenders_addons.bzl)): +#### CPU or GPU Serving TensorFlow models with custom ops +When compiling, set the environment variable: ``` -deps = deps + [ - # "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ] - -native.cc_library( - name = name, - srcs = srcs, - copts = copts, - alwayslink = 1, - features = select({ - "//tensorflow_recommenders_addons:windows": ["windows_export_all_symbols"], - "//conditions:default": [], - }), - deps = deps, - **kwargs - ) +export FOR_TF_SERVING = "1" ``` Tensorflow Serving modification(**model_servers/BUILD**): ``` -SUPPORTED_TENSORFLOW_OPS = if_v2([]) + if_not_v2([ - "@org_tensorflow//tensorflow/contrib:contrib_kernels", - "@org_tensorflow//tensorflow/contrib:contrib_ops_op_lib", -]) + [ - "@org_tensorflow_text//tensorflow_text:ops_lib", - "//tensorflow_recommenders_addons/dynamic_embedding/core:_cuckoo_hashtable_ops.so", -] -``` -#### GPU Serving TensorFlow models with custom ops -Including CPU Serving modification, TFRA modification([`tensorflow_recommenders_addons.bzl`](tensorflow_recommenders_addons/tensorflow_recommenders_addons.bzl)) again: -``` -if cuda_srcs: - copts = copts + if_cuda(["-DGOOGLE_CUDA=1"]) - cuda_copts = copts + if_cuda_is_configured([ - "-x cuda", - "-nvcc_options=relaxed-constexpr", - "-nvcc_options=ftz=true", - ]) - cuda_deps = deps + if_cuda_is_configured(["//tensorflow_recommenders_addons/dynamic_embedding/core/lib/nvhash:nvhashtable"]) + if_cuda_is_configured([ - "@local_config_cuda//cuda:cuda_headers", - "@local_config_cuda//cuda:cudart_static", - ]) - basename = name.split(".")[0] - native.cc_library( - name = basename + "_gpu", - srcs = cuda_srcs, - deps = cuda_deps, - copts = cuda_copts, - alwayslink = 1, - **kwargs - ) - deps = deps + if_cuda_is_configured([":" + basename + "_gpu"]) -``` -Tensorflow Serving modification(**model_servers/BUILD**) again: -``` SUPPORTED_TENSORFLOW_OPS = if_v2([]) + if_not_v2([ "@org_tensorflow//tensorflow/contrib:contrib_kernels", "@org_tensorflow//tensorflow/contrib:contrib_ops_op_lib", diff --git a/build_deps/tf_dependency/build_defs.bzl.tpl b/build_deps/tf_dependency/build_defs.bzl.tpl index 6d5c2e297..b0a4b4848 100644 --- a/build_deps/tf_dependency/build_defs.bzl.tpl +++ b/build_deps/tf_dependency/build_defs.bzl.tpl @@ -3,3 +3,5 @@ D_GLIBCXX_USE_CXX11_ABI = "%{tf_cx11_abi}" DTF_VERSION = "%{tf_version}" + +FOR_TF_SERVING = "%{for_tf_serving}" diff --git a/build_deps/tf_dependency/tf_configure.bzl b/build_deps/tf_dependency/tf_configure.bzl index bf660f897..ac50e39a3 100644 --- a/build_deps/tf_dependency/tf_configure.bzl +++ b/build_deps/tf_dependency/tf_configure.bzl @@ -8,6 +8,8 @@ _TF_SHARED_LIBRARY_NAME = "TF_SHARED_LIBRARY_NAME" _TF_CXX11_ABI_FLAG = "TF_CXX11_ABI_FLAG" +_FOR_TF_SERVING = "FOR_TF_SERVING" + TF_VERSION = "TF_VERSION" def _tpl(repository_ctx, tpl, substitutions = {}, out = None): @@ -207,6 +209,7 @@ def _tf_pip_impl(repository_ctx): tf_shared_library_path = "%s/%s" % (tf_shared_library_dir, tf_shared_library_name) tf_cx11_abi = "-D_GLIBCXX_USE_CXX11_ABI=%s" % (repository_ctx.os.environ[_TF_CXX11_ABI_FLAG]) tf_version = "-DTF_VERSION=%s" % (repository_ctx.os.environ[TF_VERSION]) + for_tf_serving = repository_ctx.os.environ[_FOR_TF_SERVING] tf_shared_library_rule = _symlink_genrule_for_dir( repository_ctx, @@ -229,6 +232,7 @@ def _tf_pip_impl(repository_ctx): { "%{tf_cx11_abi}": tf_cx11_abi, "%{tf_version}": tf_version, + "%{for_tf_serving}": for_tf_serving, }, ) @@ -238,6 +242,7 @@ tf_configure = repository_rule( _TF_SHARED_LIBRARY_DIR, _TF_SHARED_LIBRARY_NAME, _TF_CXX11_ABI_FLAG, + _FOR_TF_SERVING, ], implementation = _tf_pip_impl, ) diff --git a/configure.py b/configure.py index 18a93f71a..aa7ae92bc 100644 --- a/configure.py +++ b/configure.py @@ -150,6 +150,8 @@ def create_build_configuration(): # TODO(Lifann) write them to enviroment variables. write_action_env("TF_VERSION", tf_version) + write_action_env("FOR_TF_SERVING", os.getenv("FOR_TF_SERVING", "0")) + write("build --spawn_strategy=standalone") write("build --strategy=Genrule=standalone") write("build -c opt") diff --git a/tensorflow_recommenders_addons/dynamic_embedding/core/BUILD b/tensorflow_recommenders_addons/dynamic_embedding/core/BUILD index 965e5b313..847b6a5cf 100644 --- a/tensorflow_recommenders_addons/dynamic_embedding/core/BUILD +++ b/tensorflow_recommenders_addons/dynamic_embedding/core/BUILD @@ -4,8 +4,9 @@ load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda", "if_cuda_is_configure package(default_visibility = ["//visibility:public"]) -load("//tensorflow_recommenders_addons:tensorflow_recommenders_addons.bzl", "custom_op_library") +load("//tensorflow_recommenders_addons:tensorflow_recommenders_addons.bzl", "custom_op_library", "if_cuda_for_tf_serving") load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda") +load("@local_config_tf//:build_defs.bzl", "FOR_TF_SERVING") custom_op_library( name = "_cuckoo_hashtable_ops.so", @@ -14,7 +15,11 @@ custom_op_library( "kernels/cuckoo_hashtable_op.cc", "ops/cuckoo_hashtable_ops.cc", ] + glob(["kernels/lookup_impl/lookup_table_op_cpu*"]), - cuda_deps = if_cuda(["//tensorflow_recommenders_addons/dynamic_embedding/core/lib/nvhash:nvhashtable"]), + cuda_deps = if_cuda_for_tf_serving( + ["//tensorflow_recommenders_addons/dynamic_embedding/core/lib/nvhash:nvhashtable"], + [], + FOR_TF_SERVING, + ), cuda_srcs = if_cuda([ "kernels/cuckoo_hashtable_op_gpu.h", "kernels/cuckoo_hashtable_op_gpu.cu.cc", diff --git a/tensorflow_recommenders_addons/tensorflow_recommenders_addons.bzl b/tensorflow_recommenders_addons/tensorflow_recommenders_addons.bzl index 0ebfd5e0d..b63a79af8 100644 --- a/tensorflow_recommenders_addons/tensorflow_recommenders_addons.bzl +++ b/tensorflow_recommenders_addons/tensorflow_recommenders_addons.bzl @@ -2,6 +2,7 @@ load( "@local_config_tf//:build_defs.bzl", "DTF_VERSION", "D_GLIBCXX_USE_CXX11_ABI", + "FOR_TF_SERVING", ) load( "@local_config_cuda//cuda:build_defs.bzl", @@ -17,10 +18,15 @@ def custom_op_library( cuda_deps = [], copts = [], **kwargs): - deps = deps + [ - "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ] + if FOR_TF_SERVING == "1": + deps = deps + [ + "@local_config_tf//:tf_header_lib", + ] + else: + deps = deps + [ + "@local_config_tf//:libtensorflow_framework", + "@local_config_tf//:tf_header_lib", + ] if cuda_srcs: copts = copts + if_cuda(["-DGOOGLE_CUDA=1"]) @@ -67,15 +73,45 @@ def custom_op_library( ], }) - native.cc_binary( - name = name, - srcs = srcs, - copts = copts, - linkshared = 1, - features = select({ - "//tensorflow_recommenders_addons:windows": ["windows_export_all_symbols"], - "//conditions:default": [], - }), - deps = deps, - **kwargs - ) + if FOR_TF_SERVING == "1": + native.cc_library( + name = name, + srcs = srcs, + copts = copts, + alwayslink = 1, + features = select({ + "//tensorflow_recommenders_addons:windows": ["windows_export_all_symbols"], + "//conditions:default": [], + }), + deps = deps, + **kwargs + ) + else: + native.cc_binary( + name = name, + srcs = srcs, + copts = copts, + linkshared = 1, + features = select({ + "//tensorflow_recommenders_addons:windows": ["windows_export_all_symbols"], + "//conditions:default": [], + }), + deps = deps, + **kwargs + ) + +def if_cuda_for_tf_serving(if_true, if_false = [], for_tf_serving = "0"): + """Shorthand for select()'ing on whether we're building with CUDA. + + Returns a select statement which evaluates to if_true if we're building + with CUDA enabled. Otherwise, the select statement evaluates to if_false. + + """ + if for_tf_serving == "1": + return if_true + + return select({ + "@local_config_cuda//cuda:using_nvcc": if_true, + "@local_config_cuda//cuda:using_clang": if_true, + "//conditions:default": if_false, + })