From cda40aee79b9930fbb8d09f46bfa4714be5cba0d Mon Sep 17 00:00:00 2001 From: chengmengli06 Date: Sun, 24 Jul 2022 16:08:13 +0800 Subject: [PATCH 1/6] add support for tf25 py3 test --- .github/workflows/ci_py3_tf25.yml | 115 ++++++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) create mode 100644 .github/workflows/ci_py3_tf25.yml diff --git a/.github/workflows/ci_py3_tf25.yml b/.github/workflows/ci_py3_tf25.yml new file mode 100644 index 000000000..032c9a5ab --- /dev/null +++ b/.github/workflows/ci_py3_tf25.yml @@ -0,0 +1,115 @@ +name: CI Build PY3 +on: + pull_request: + types: [opened, reopened, synchronize] + +jobs: + ci-test: + runs-on: EasyRec-py37-tf25 + defaults: + run: + shell: bash {0} + steps: + - name: FetchCommit ${{ github.event.pull_request.head.sha }} + uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} + submodules: recursive + - name: RunCiTest + id: run_ci_test + env: + TEST_DEVICES: "" + PULL_REQUEST_NUM: ${{ github.event.pull_request.number }} + run: | + source activate tf25_py3 + python git-lfs/git_lfs.py pull + source scripts/ci_test.sh + - name: LabelAndComment + env: + CI_TEST_PASSED: ${{steps.run_ci_test.outputs.ci_test_passed}} + uses: actions/github-script@v5 + with: + script: | + const { CI_TEST_PASSED } = process.env + labels = await github.rest.issues.listLabelsOnIssue({ + issue_number: context.issue.number, + repo:context.repo.repo, + owner:context.repo.owner + }) + console.log('labels.url=' + labels.url) + + labels = labels.data + + var label_names = [] + if (labels != null) { + labels.forEach(tmp_lbl => label_names.push(tmp_lbl.name)) + } + console.log(`ci_test_passed=${CI_TEST_PASSED} labels=${label_names}`); + + var pass_label = null; + if (labels != null) { + pass_label = labels.find(label=>label.name=='ci_py3_tf25_test_passed'); + } + + var fail_label = null; + if (labels != null) { + fail_label = labels.find(label=>label.name=='ci_py3_tf25_test_failed'); + } + + if (pass_label) { + github.rest.issues.removeLabel({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + name: 'ci_py3_tf25_test_passed' + }) + } + + if (fail_label) { + github.rest.issues.removeLabel({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + name: 'ci_py3_tf25_test_failed' + }) + } + + if (CI_TEST_PASSED == 1) { + github.rest.issues.addLabels({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: ['ci_py3_tf25_test_passed'] + }) + + github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + body: "CI PY3 TF25 Test Passed" + }) + } else { + github.rest.issues.addLabels({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: ['ci_py3_tf25_test_failed'] + }) + + github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + body: "CI PY3 TF25 Test Failed" + }) + } + - name: SignalFail + env: + CI_TEST_PASSED: ${{steps.run_ci_test.outputs.ci_test_passed}} + run: | + echo "CI_TEST_PASSED=${CI_TEST_PASSED}" + if [ $CI_TEST_PASSED -ne 1 ] + then + echo "ci_py3_tf25_test_failed, will exit" + exit 1 + fi From dce6a5a677e9428b90f090e24346d42092a0f2fd Mon Sep 17 00:00:00 2001 From: chengmengli06 Date: Sun, 24 Jul 2022 16:53:23 +0800 Subject: [PATCH 2/6] add support for tf25 py3 test --- .github/workflows/ci_py3_tf25.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_py3_tf25.yml b/.github/workflows/ci_py3_tf25.yml index 032c9a5ab..b1581bba2 100644 --- a/.github/workflows/ci_py3_tf25.yml +++ b/.github/workflows/ci_py3_tf25.yml @@ -1,4 +1,4 @@ -name: CI Build PY3 +name: CI Build PY3 TF25 on: pull_request: types: [opened, reopened, synchronize] From 8817a5437e8eae4d60182547473c619893ce7569 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A4=A9=E9=82=91?= Date: Mon, 25 Jul 2022 11:47:55 +0800 Subject: [PATCH 3/6] fix feature_selection tf25 compat --- easy_rec/python/tools/feature_selection.py | 3 +++ setup.cfg | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/easy_rec/python/tools/feature_selection.py b/easy_rec/python/tools/feature_selection.py index 3f7c8969b..295698013 100644 --- a/easy_rec/python/tools/feature_selection.py +++ b/easy_rec/python/tools/feature_selection.py @@ -12,6 +12,9 @@ from easy_rec.python.utils import config_util +if tf.__version__ >= '2.0': + tf = tf.compat.v1 + import matplotlib # NOQA matplotlib.use('Agg') # NOQA import matplotlib.pyplot as plt # NOQA diff --git a/setup.cfg b/setup.cfg index 00e26bc28..e6d30fc7d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -10,7 +10,7 @@ multi_line_output = 7 force_single_line = true known_standard_library = setuptools known_first_party = easy_rec -known_third_party = absl,common_io,future,google,graphlearn,matplotlib,nni,numpy,odps,oss2,pai,pandas,psutil,six,sklearn,sphinx_markdown_tables,sphinx_rtd_theme,tensorflow,yaml +known_third_party = absl,common_io,distutils,future,google,graphlearn,matplotlib,nni,numpy,odps,oss2,pai,pandas,psutil,six,sklearn,sphinx_markdown_tables,sphinx_rtd_theme,tensorflow,yaml no_lines_before = LOCALFOLDER default_section = THIRDPARTY skip = easy_rec/python/protos From fc8e0fe74c065e45e6509325c461b89a401a5bde Mon Sep 17 00:00:00 2001 From: chengmengli06 Date: Mon, 25 Jul 2022 15:58:03 +0800 Subject: [PATCH 4/6] skip multi worker mirror test in high versions as there are bugs in collective_ops all_reduce --- easy_rec/python/test/train_eval_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/easy_rec/python/test/train_eval_test.py b/easy_rec/python/test/train_eval_test.py index b06622853..3b75a3478 100644 --- a/easy_rec/python/test/train_eval_test.py +++ b/easy_rec/python/test/train_eval_test.py @@ -634,8 +634,8 @@ def test_dssm_sample_weight(self): self.assertTrue(self._success) @unittest.skipIf( - LooseVersion(tf.__version__) < LooseVersion('2.3.0'), - 'MultiWorkerMirroredStrategy need tf version > 2.3') + LooseVersion(tf.__version__) != LooseVersion('2.3.0'), + 'MultiWorkerMirroredStrategy need tf version == 2.3') def test_train_with_multi_worker_mirror(self): self._success = test_utils.test_distributed_train_eval( 'samples/model_config/multi_tower_multi_worker_mirrored_strategy_on_taobao.config', From 5bde4eb84cdc68aa70122506cd37b964df58dd7d Mon Sep 17 00:00:00 2001 From: weisu Date: Mon, 25 Jul 2022 20:24:08 +0800 Subject: [PATCH 5/6] [bugfix]: modify bug of tf25. add compact layer_norm layer. --- docs/source/feature/rtp_fg.md | 2 +- easy_rec/python/compat/layers.py | 330 ++++++++++++++++++ .../layers/multihead_cross_attention.py | 3 +- .../loss/softmax_loss_with_negative_mining.py | 9 +- easy_rec/python/test/loss_test.py | 5 +- requirements/docs.txt | 2 +- setup.cfg | 2 +- 7 files changed, 344 insertions(+), 9 deletions(-) create mode 100644 easy_rec/python/compat/layers.py diff --git a/docs/source/feature/rtp_fg.md b/docs/source/feature/rtp_fg.md index 9d76cd3d9..3b33f2b65 100644 --- a/docs/source/feature/rtp_fg.md +++ b/docs/source/feature/rtp_fg.md @@ -647,7 +647,7 @@ message PBResponse { map context_features = 4; string error_msg = 5; - + StatusCode status_code = 6; // item ids diff --git a/easy_rec/python/compat/layers.py b/easy_rec/python/compat/layers.py new file mode 100644 index 000000000..65bf25293 --- /dev/null +++ b/easy_rec/python/compat/layers.py @@ -0,0 +1,330 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Higher level ops for building layers.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import init_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import variable_scope + + +def layer_norm(inputs, + center=True, + scale=True, + activation_fn=None, + reuse=None, + variables_collections=None, + outputs_collections=None, + trainable=True, + begin_norm_axis=1, + begin_params_axis=-1, + scope=None): + """Adds a Layer Normalization layer. + + Based on the paper: + + "Layer Normalization" + Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton + https://arxiv.org/abs/1607.06450. + + Can be used as a normalizer function for conv2d and fully_connected. + + Given a tensor `inputs` of rank `R`, moments are calculated and normalization + is performed over axes `begin_norm_axis ... R - 1`. Scaling and centering, + if requested, is performed over axes `begin_params_axis .. R - 1`. + + By default, `begin_norm_axis = 1` and `begin_params_axis = -1`, + meaning that normalization is performed over all but the first axis + (the `HWC` if `inputs` is `NHWC`), while the `beta` and `gamma` trainable + parameters are calculated for the rightmost axis (the `C` if `inputs` is + `NHWC`). Scaling and recentering is performed via broadcast of the + `beta` and `gamma` parameters with the normalized tensor. + + The shapes of `beta` and `gamma` are `inputs.shape[begin_params_axis:]`, + and this part of the inputs' shape must be fully defined. + + Args: + inputs: A tensor having rank `R`. The normalization is performed over + axes `begin_norm_axis ... R - 1` and centering and scaling parameters + are calculated over `begin_params_axis ... R - 1`. + center: If True, add offset of `beta` to normalized tensor. If False, `beta` + is ignored. + scale: If True, multiply by `gamma`. If False, `gamma` is + not used. When the next layer is linear (also e.g. `nn.relu`), this can be + disabled since the scaling can be done by the next layer. + activation_fn: Activation function, default set to None to skip it and + maintain a linear activation. + reuse: Whether or not the layer and its variables should be reused. To be + able to reuse the layer scope must be given. + variables_collections: Optional collections for the variables. + outputs_collections: Collections to add the outputs. + trainable: If `True` also add variables to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). + begin_norm_axis: The first normalization dimension: normalization will be + performed along dimensions `begin_norm_axis : rank(inputs)` + begin_params_axis: The first parameter (beta, gamma) dimension: scale + and centering parameters will have dimensions + `begin_params_axis : rank(inputs)` and will be broadcast with the + normalized inputs accordingly. + scope: Optional scope for `variable_scope`. + + Returns: + A `Tensor` representing the output of the operation, having the same + shape and dtype as `inputs`. + + Raises: + ValueError: If the rank of `inputs` is not known at graph build time, + or if `inputs.shape[begin_params_axis:]` is not fully defined at + graph build time. + """ + with variable_scope.variable_scope( + scope, 'LayerNorm', [inputs], reuse=reuse) as sc: + inputs = ops.convert_to_tensor(inputs) + inputs_shape = inputs.shape + inputs_rank = inputs_shape.ndims + if inputs_rank is None: + raise ValueError('Inputs %s has undefined rank.' % inputs.name) + dtype = inputs.dtype.base_dtype + if begin_norm_axis < 0: + begin_norm_axis = inputs_rank + begin_norm_axis + if begin_params_axis >= inputs_rank or begin_norm_axis >= inputs_rank: + raise ValueError('begin_params_axis (%d) and begin_norm_axis (%d) ' + 'must be < rank(inputs) (%d)' % + (begin_params_axis, begin_norm_axis, inputs_rank)) + params_shape = inputs_shape[begin_params_axis:] + if not params_shape.is_fully_defined(): + raise ValueError( + 'Inputs %s: shape(inputs)[%s:] is not fully defined: %s' % + (inputs.name, begin_params_axis, inputs_shape)) + # Allocate parameters for the beta and gamma of the normalization. + beta, gamma = None, None + if center: + beta_collections = get_variable_collections(variables_collections, 'beta') + beta = model_variable( + 'beta', + shape=params_shape, + dtype=dtype, + initializer=init_ops.zeros_initializer(), + collections=beta_collections, + trainable=trainable) + if scale: + gamma_collections = get_variable_collections(variables_collections, + 'gamma') + from tensorflow.contrib.framework.python.ops import variables + gamma = variables.model_variable( + 'gamma', + shape=params_shape, + dtype=dtype, + initializer=init_ops.ones_initializer(), + collections=gamma_collections, + trainable=trainable) + # Calculate the moments on the last axis (layer activations). + norm_axes = list(range(begin_norm_axis, inputs_rank)) + mean, variance = nn.moments(inputs, norm_axes, keep_dims=True) + # Compute layer normalization using the batch_normalization function. + variance_epsilon = 1e-12 + outputs = nn.batch_normalization( + inputs, + mean, + variance, + offset=beta, + scale=gamma, + variance_epsilon=variance_epsilon) + outputs.set_shape(inputs_shape) + if activation_fn is not None: + outputs = activation_fn(outputs) + return collect_named_outputs(outputs_collections, sc.name, outputs) + + +def get_variable_collections(variables_collections, name): + if isinstance(variables_collections, dict): + variable_collections = variables_collections.get(name, None) + else: + variable_collections = variables_collections + return variable_collections + + +def collect_named_outputs(collections, alias, outputs): + """Add `Tensor` outputs tagged with alias to collections. + + It is useful to collect end-points or tags for summaries. Example of usage: + logits = collect_named_outputs('end_points', 'inception_v3/logits', logits) + assert 'inception_v3/logits' in logits.aliases + + Args: + collections: A collection or list of collections. If None skip collection. + alias: String to append to the list of aliases of outputs, for example, + 'inception_v3/conv1'. + outputs: Tensor, an output tensor to collect + + Returns: + The outputs Tensor to allow inline call. + """ + if collections: + append_tensor_alias(outputs, alias) + ops.add_to_collections(collections, outputs) + return outputs + + +def append_tensor_alias(tensor, alias): + """Append an alias to the list of aliases of the tensor. + + Args: + tensor: A `Tensor`. + alias: String, to add to the list of aliases of the tensor. + + Returns: + The tensor with a new alias appended to its list of aliases. + """ + # Remove ending '/' if present. + if alias[-1] == '/': + alias = alias[:-1] + if hasattr(tensor, 'aliases'): + tensor.aliases.append(alias) + else: + tensor.aliases = [alias] + return tensor + + +def variable(name, + shape=None, + dtype=None, + initializer=None, + regularizer=None, + trainable=True, + collections=None, + caching_device=None, + device=None, + partitioner=None, + custom_getter=None, + use_resource=None): + """Gets an existing variable with these parameters or creates a new one. + + Args: + name: the name of the new or existing variable. + shape: shape of the new or existing variable. + dtype: type of the new or existing variable (defaults to `DT_FLOAT`). + initializer: initializer for the variable if one is created. + regularizer: a (Tensor -> Tensor or None) function; the result of + applying it on a newly created variable will be added to the collection + GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. + trainable: If `True` also add the variable to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). + collections: A list of collection names to which the Variable will be added. + If None it would default to `tf.GraphKeys.GLOBAL_VARIABLES`. + caching_device: Optional device string or function describing where the + Variable should be cached for reading. Defaults to the Variable's + device. + device: Optional device to place the variable. It can be an string or a + function that is called to get the device for the variable. + partitioner: Optional callable that accepts a fully defined `TensorShape` + and dtype of the `Variable` to be created, and returns a list of + partitions for each axis (currently only one axis can be partitioned). + custom_getter: Callable that allows overwriting the internal + get_variable method and has to have the same signature. + use_resource: If `True` use a ResourceVariable instead of a Variable. + + Returns: + The created or existing variable. + """ + collections = list(collections if collections is not None else + [ops.GraphKeys.GLOBAL_VARIABLES]) + + # Remove duplicates + collections = list(set(collections)) + getter = variable_scope.get_variable + if custom_getter is not None: + getter = functools.partial( + custom_getter, reuse=variable_scope.get_variable_scope().reuse) + with ops.device(device or ''): + return getter( + name, + shape=shape, + dtype=dtype, + initializer=initializer, + regularizer=regularizer, + trainable=trainable, + collections=collections, + caching_device=caching_device, + partitioner=partitioner, + use_resource=use_resource) + + +def model_variable(name, + shape=None, + dtype=dtypes.float32, + initializer=None, + regularizer=None, + trainable=True, + collections=None, + caching_device=None, + device=None, + partitioner=None, + custom_getter=None, + use_resource=None): + """Gets an existing model variable with these parameters or creates a new one. + + Args: + name: the name of the new or existing variable. + shape: shape of the new or existing variable. + dtype: type of the new or existing variable (defaults to `DT_FLOAT`). + initializer: initializer for the variable if one is created. + regularizer: a (Tensor -> Tensor or None) function; the result of + applying it on a newly created variable will be added to the collection + GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. + trainable: If `True` also add the variable to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). + collections: A list of collection names to which the Variable will be added. + Note that the variable is always also added to the + `GraphKeys.GLOBAL_VARIABLES` and `GraphKeys.MODEL_VARIABLES` collections. + caching_device: Optional device string or function describing where the + Variable should be cached for reading. Defaults to the Variable's + device. + device: Optional device to place the variable. It can be an string or a + function that is called to get the device for the variable. + partitioner: Optional callable that accepts a fully defined `TensorShape` + and dtype of the `Variable` to be created, and returns a list of + partitions for each axis (currently only one axis can be partitioned). + custom_getter: Callable that allows overwriting the internal + get_variable method and has to have the same signature. + use_resource: If `True` use a ResourceVariable instead of a Variable. + + Returns: + The created or existing variable. + """ + collections = list(collections or []) + collections += [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.MODEL_VARIABLES] + var = variable( + name, + shape=shape, + dtype=dtype, + initializer=initializer, + regularizer=regularizer, + trainable=trainable, + collections=collections, + caching_device=caching_device, + device=device, + partitioner=partitioner, + custom_getter=custom_getter, + use_resource=use_resource) + return var diff --git a/easy_rec/python/layers/multihead_cross_attention.py b/easy_rec/python/layers/multihead_cross_attention.py index 6916d9f9b..d5d49bb68 100644 --- a/easy_rec/python/layers/multihead_cross_attention.py +++ b/easy_rec/python/layers/multihead_cross_attention.py @@ -4,6 +4,7 @@ import tensorflow as tf +from easy_rec.python.compat.layers import layer_norm as tf_layer_norm from easy_rec.python.layers.common_layers import gelu from easy_rec.python.utils.shape_utils import get_shape_list @@ -561,7 +562,7 @@ def cross_attention_tower(left_tensor, def layer_norm(input_tensor, name=None): """Run layer normalization on the last dimension of the tensor.""" - return tf.contrib.layers.layer_norm( + return tf_layer_norm( inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name) diff --git a/easy_rec/python/loss/softmax_loss_with_negative_mining.py b/easy_rec/python/loss/softmax_loss_with_negative_mining.py index fe1b267a6..99f92d4af 100644 --- a/easy_rec/python/loss/softmax_loss_with_negative_mining.py +++ b/easy_rec/python/loss/softmax_loss_with_negative_mining.py @@ -38,7 +38,8 @@ def softmax_loss_with_negative_mining(user_emb, weights=1.0, gamma=1.0, margin=0, - t=1): + t=1, + seed=None): """Compute the softmax loss based on the cosine distance explained below. Given mini batches for `user_emb` and `item_emb`, this function computes for each element in `user_emb` @@ -60,6 +61,10 @@ def softmax_loss_with_negative_mining(user_emb, gamma: smooth coefficient of softmax margin: the margin between positive pair and negative pair t: coefficient of support vector guided softmax loss + seed: A Python integer. Used to create a random seed for the distribution. + See `tf.set_random_seed` + for behavior. + Return: support vector guided softmax loss of positive labels """ @@ -77,7 +82,7 @@ def softmax_loss_with_negative_mining(user_emb, vectors = [item_emb] for i in range(num_negative_samples): - shift = tf.random_uniform([], 1, batch_size, dtype=tf.int32) + shift = tf.random_uniform([], 1, batch_size, dtype=tf.int32, seed=seed) neg_item_emb = tf.roll(item_emb, shift, axis=0) vectors.append(neg_item_emb) # all_embeddings's shape: (batch_size, num_negative_samples + 1, vec_dim) diff --git a/easy_rec/python/test/loss_test.py b/easy_rec/python/test/loss_test.py index ca4762535..f78b74ce6 100644 --- a/easy_rec/python/test/loss_test.py +++ b/easy_rec/python/test/loss_test.py @@ -34,12 +34,11 @@ def test_softmax_loss_with_negative_mining(self): [-0.7, 0.85, 0.03], [0.18, 0.89, -0.3]]) label = tf.constant([1, 1, 0, 0, 1, 1]) - tf.random.set_random_seed(1) loss = softmax_loss_with_negative_mining( - user_emb, item_emb, label, num_negative_samples=2) + user_emb, item_emb, label, num_negative_samples=2, seed=1) with self.test_session() as sess: loss_val = sess.run(loss) - self.assertAlmostEqual(loss_val, 0.76977473, delta=1e-5) + self.assertAlmostEqual(loss_val, 0.48577175, delta=1e-5) def test_circle_loss(self): print('test_circle_loss') diff --git a/requirements/docs.txt b/requirements/docs.txt index 4bcae04df..ac07ea232 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -1,7 +1,7 @@ alabaster>=0.7,<0.8,!=0.7.5 commonmark==0.8.1 +Markdown==3.2.2 recommonmark==0.6.0 sphinx==4.5.0 sphinx_markdown_tables==0.0.15 -Markdown==3.2.2 sphinx_rtd_theme diff --git a/setup.cfg b/setup.cfg index e6d30fc7d..00e26bc28 100644 --- a/setup.cfg +++ b/setup.cfg @@ -10,7 +10,7 @@ multi_line_output = 7 force_single_line = true known_standard_library = setuptools known_first_party = easy_rec -known_third_party = absl,common_io,distutils,future,google,graphlearn,matplotlib,nni,numpy,odps,oss2,pai,pandas,psutil,six,sklearn,sphinx_markdown_tables,sphinx_rtd_theme,tensorflow,yaml +known_third_party = absl,common_io,future,google,graphlearn,matplotlib,nni,numpy,odps,oss2,pai,pandas,psutil,six,sklearn,sphinx_markdown_tables,sphinx_rtd_theme,tensorflow,yaml no_lines_before = LOCALFOLDER default_section = THIRDPARTY skip = easy_rec/python/protos From f41ab5fc293529b75f26425bd0a5998c4b312a58 Mon Sep 17 00:00:00 2001 From: weisu Date: Tue, 26 Jul 2022 00:17:01 +0800 Subject: [PATCH 6/6] [bugfix]: modify bug of tf25. add compact layer_norm layer. --- easy_rec/python/compat/layers.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/easy_rec/python/compat/layers.py b/easy_rec/python/compat/layers.py index 65bf25293..651eefac8 100644 --- a/easy_rec/python/compat/layers.py +++ b/easy_rec/python/compat/layers.py @@ -130,8 +130,7 @@ def layer_norm(inputs, if scale: gamma_collections = get_variable_collections(variables_collections, 'gamma') - from tensorflow.contrib.framework.python.ops import variables - gamma = variables.model_variable( + gamma = model_variable( 'gamma', shape=params_shape, dtype=dtype,