Skip to content

Commit

Permalink
Merge branch 'main' into compress-prerelease
Browse files Browse the repository at this point in the history
  • Loading branch information
rkuester committed Oct 4, 2024
2 parents 487c17a + e3f6dc1 commit f6bd486
Show file tree
Hide file tree
Showing 7 changed files with 26 additions and 15 deletions.
3 changes: 3 additions & 0 deletions .style.yapf
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[style]
based_on_style = pep8
indent_width = 2
2 changes: 1 addition & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ Below are some tips that might be useful and improve the development experience.

```
pip install yapf
yapf log_parser.py -i --style='{based_on_style: pep8, indent_width: 2}'
yapf log_parser.py -i'
```

* Add a git hook to check for code style etc. prior to creating a pull request:
Expand Down
2 changes: 1 addition & 1 deletion codegen/build_def.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def tflm_inference_library(
srcs = [tflite_model],
outs = [name + ".h", name + ".cc"],
tools = ["//codegen:code_generator"],
cmd = "$(location //codegen:code_generator) " +
cmd = "$(location //codegen:code_generator) --quiet " +
"--model=$< --output_dir=$(RULEDIR) --output_name=%s" % name,
visibility = ["//visibility:private"],
)
Expand Down
22 changes: 20 additions & 2 deletions codegen/code_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,14 @@
""" Generates C/C++ source code capable of performing inference for a model. """

import os
import pathlib

from absl import app
from absl import flags
from collections.abc import Sequence

from tflite_micro.codegen import inference_generator
from tflite_micro.codegen import graph
from tflite_micro.tensorflow.lite.tools import flatbuffer_utils

# Usage information:
# Default:
Expand All @@ -48,15 +48,33 @@
"'model' basename."),
required=False)

_QUIET = flags.DEFINE_bool(
name="quiet",
default=False,
help="Suppress informational output (e.g., for use in for build system)",
required=False)


def main(argv: Sequence[str]) -> None:
if _QUIET.value:
restore = os.environ.get("TF_CPP_MIN_LOG_LEVEL", "0")
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
from tflite_micro.tensorflow.lite.tools import flatbuffer_utils
os.environ["TF_CPP_MIN_LOG_LEVEL"] = restore
else:
from tflite_micro.tensorflow.lite.tools import flatbuffer_utils

output_dir = _OUTPUT_DIR.value or os.path.dirname(_MODEL_PATH.value)
output_name = _OUTPUT_NAME.value or os.path.splitext(
os.path.basename(_MODEL_PATH.value))[0]

model = flatbuffer_utils.read_model(_MODEL_PATH.value)

print("Generating inference code for model: {}".format(_MODEL_PATH.value))
if not _QUIET.value:
print("Generating inference code for model: {}".format(_MODEL_PATH.value))
output_path = pathlib.Path(output_dir) / output_name
print(f"Generating {output_path}.h")
print(f"Generating {output_path}.cc")

inference_generator.generate(output_dir, output_name,
graph.OpCodeTable([model]), graph.Graph(model))
Expand Down
1 change: 0 additions & 1 deletion codegen/inference_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ class ModelData(TypedDict):

def _render(output_file: pathlib.Path, template_file: pathlib.Path,
model_data: ModelData) -> None:
print("Generating {}".format(output_file))
t = template.Template(filename=str(template_file))
with output_file.open('w+') as file:
file.write(t.render(**model_data))
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/micro/docs/compression.md
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ value: 2 4 4 10 1 7 99 10 2 4
### Per-channel Quantized Tensor Value Tables

For per-channel quantized tensors, a `value table` is present for each channel.
All of the the `value tables` are concatenated together into a single contiguous
All of the `value tables` are concatenated together into a single contiguous
set of values. The number of elements in each `value table` is always identical,
with zero value padding added to the end of a `value table` as necessary.

Expand Down
9 changes: 0 additions & 9 deletions tensorflow/lite/micro/tools/make/pigweed.patch
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,6 @@ diff --git a/pw_presubmit/py/pw_presubmit/format_code.py b/pw_presubmit/py/pw_pr
index 19d09546..c1ff6b5a 100755
--- a/pw_presubmit/py/pw_presubmit/format_code.py
+++ b/pw_presubmit/py/pw_presubmit/format_code.py
@@ -142,7 +142,7 @@ def fix_go_format(files: Iterable[Path]) -> None:


def _yapf(*args, **kwargs) -> subprocess.CompletedProcess:
- return log_run(['python', '-m', 'yapf', '--parallel', *args],
+ return log_run(['python', '-m', 'yapf', '--style', '{based_on_style:pep8,indent_width:2}', '--parallel', *args],
capture_output=True,
**kwargs)

@@ -229,11 +229,6 @@ def print_format_check(errors: Dict[Path, str],
except ValueError:
return Path(path).resolve()
Expand Down

0 comments on commit f6bd486

Please sign in to comment.