diff --git a/.github/workflows/examples-efr32.yaml b/.github/workflows/examples-efr32.yaml index 2b73a924b26ee5..d98671cf85e350 100644 --- a/.github/workflows/examples-efr32.yaml +++ b/.github/workflows/examples-efr32.yaml @@ -30,6 +30,9 @@ jobs: env: EFR32_BOARD: BRD4161A BUILD_TYPE: gn_efr32 + GH_EVENT_PR: ${{ github.event_name == 'pull_request' && github.event.number || 0 }} + GH_EVENT_HASH: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + GH_EVENT_PARENT: ${{ github.event_name == 'pull_request' && github.event.pull_request.base.sha || github.event.before }} runs-on: ubuntu-latest if: github.actor != 'restyled-io[bot]' @@ -57,25 +60,29 @@ jobs: .environment/pigweed-venv/*.log - name: Build example EFR32 Lock App for BRD4161A timeout-minutes: 10 - run: - scripts/examples/gn_efr32_example.sh examples/lock-app/efr32/ - out/lock_app_debug BRD4161A + run: | + scripts/examples/gn_efr32_example.sh examples/lock-app/efr32/ out/lock_app_debug BRD4161A + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py efr32 BRD4161A lock-app \ + out/lock_app_debug/BRD4161A/chip-efr32-lock-example.out /tmp/bloat_reports/ - name: Build example EFR32 Lighting App for BRD4161A timeout-minutes: 10 - run: - scripts/examples/gn_efr32_example.sh - examples/lighting-app/efr32/ out/lighting_app_debug BRD4161A + run: | + scripts/examples/gn_efr32_example.sh examples/lighting-app/efr32/ out/lighting_app_debug BRD4161A + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py efr32 BRD4161A lighting-app \ + out/lighting_app_debug/BRD4161A/chip-efr32-lighting-example.out /tmp/bloat_reports/ - name: Build example EFR32 Lighting App for BRD4161A with RPCs timeout-minutes: 10 - run: - scripts/examples/gn_efr32_example.sh - examples/lighting-app/efr32/ out/lighting_app_debug_rpc BRD4161A - -args='import("//with_pw_rpc.gni")' + run: | + scripts/examples/gn_efr32_example.sh examples/lighting-app/efr32/ out/lighting_app_debug_rpc BRD4161A \ + -args='import("//with_pw_rpc.gni")' + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py efr32 BRD4161A+rpc lighting-app \ + out/lighting_app_debug_rpc/BRD4161A/chip-efr32-lighting-example.out /tmp/bloat_reports/ - name: Build example EFR32 Window Covering for BRD4161A timeout-minutes: 10 - run: - scripts/examples/gn_efr32_example.sh examples/window-app/efr32/ - out/window_app_debug BRD4161A + run: | + scripts/examples/gn_efr32_example.sh examples/window-app/efr32/ out/window_app_debug BRD4161A + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py efr32 BRD4161A window-app \ + out/window_app_debug/BRD4161A/chip-efr32-window-example.out /tmp/bloat_reports/ - name: Binary artifact suffix id: outsuffix uses: haya14busa/action-cond@v1.0.0 @@ -94,3 +101,9 @@ jobs: out/lock_app_debug/BRD4161A/chip-efr32-lock-example.out.map out/lighting_app_debug_rpc/BRD4161A/chip-efr32-lighting-example.out out/lighting_app_debug_rpc/BRD4161A/chip-efr32-lighting-example.out.map + - name: Uploading Size Reports + uses: actions/upload-artifact@v2 + with: + name: Size,EFR32-Examples,${{ env.GH_EVENT_PR }},${{ env.GH_EVENT_HASH }},${{ env.GH_EVENT_PARENT }} + path: | + /tmp/bloat_reports/ diff --git a/.github/workflows/examples-esp32.yaml b/.github/workflows/examples-esp32.yaml index 1cce1a171e9083..dfb4cf96f42243 100644 --- a/.github/workflows/examples-esp32.yaml +++ b/.github/workflows/examples-esp32.yaml @@ -30,6 +30,9 @@ jobs: env: BUILD_TYPE: esp32 + GH_EVENT_PR: ${{ github.event_name == 'pull_request' && github.event.number || 0 }} + GH_EVENT_HASH: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + GH_EVENT_PARENT: ${{ github.event_name == 'pull_request' && github.event.pull_request.base.sha || github.event.before }} runs-on: ubuntu-latest if: github.actor != 'restyled-io[bot]' @@ -64,6 +67,10 @@ jobs: mkdir -p example_binaries/$BUILD_TYPE-build cp examples/all-clusters-app/esp32/build/chip-all-clusters-app.elf \ example_binaries/$BUILD_TYPE-build/chip-all-clusters-app.elf + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + esp32 m5stack all-clusters-app \ + example_binaries/$BUILD_TYPE-build/chip-all-clusters-app.elf \ + /tmp/bloat_reports/ - name: Build example All Clusters App C3 timeout-minutes: 10 run: scripts/examples/esp_example.sh all-clusters-app sdkconfig_c3devkit.defaults @@ -72,6 +79,10 @@ jobs: mkdir -p example_binaries/$BUILD_TYPE-build cp examples/all-clusters-app/esp32/build/chip-all-clusters-app.elf \ example_binaries/$BUILD_TYPE-build/chip-all-clusters-app.elf + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + esp32 c3devkit all-clusters-app \ + example_binaries/$BUILD_TYPE-build/chip-all-clusters-app.elf \ + /tmp/bloat_reports/ - name: Build example Pigweed App timeout-minutes: 5 run: scripts/examples/esp_example.sh pigweed-app sdkconfig.defaults @@ -80,6 +91,10 @@ jobs: mkdir -p example_binaries/$BUILD_TYPE-build cp examples/pigweed-app/esp32/build/chip-pigweed-app.elf \ example_binaries/$BUILD_TYPE-build/chip-pigweed-app.elf + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + esp32 default pigweed-app \ + example_binaries/$BUILD_TYPE-build/chip-pigweed-app.elf \ + /tmp/bloat_reports/ - name: Build example Lock App timeout-minutes: 5 run: scripts/examples/esp_example.sh lock-app sdkconfig.defaults @@ -88,6 +103,10 @@ jobs: mkdir -p example_binaries/$BUILD_TYPE-build cp examples/lock-app/esp32/build/chip-lock-app.elf \ example_binaries/$BUILD_TYPE-build/chip-lock-app.elf + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + esp32 default lock-app \ + example_binaries/$BUILD_TYPE-build/chip-lock-app.elf \ + /tmp/bloat_reports/ - name: Build example Bridge App timeout-minutes: 5 run: scripts/examples/esp_example.sh bridge-app @@ -96,6 +115,10 @@ jobs: mkdir -p example_binaries/$BUILD_TYPE-build cp examples/bridge-app/esp32/build/chip-bridge-app.elf \ example_binaries/$BUILD_TYPE-build/chip-bridge-app.elf + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + esp32 default bridge-app \ + example_binaries/$BUILD_TYPE-build/chip-bridge-app.elf \ + /tmp/bloat_reports/ - name: Build example Persistent Storage App timeout-minutes: 5 run: scripts/examples/esp_example.sh persistent-storage sdkconfig.defaults @@ -104,6 +127,10 @@ jobs: mkdir -p example_binaries/$BUILD_TYPE-build cp examples/persistent-storage/esp32/build/chip-persistent-storage.elf \ example_binaries/$BUILD_TYPE-build/chip-persistent-storage.elf + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + esp32 default persistent-storage \ + example_binaries/$BUILD_TYPE-build/chip-persistent-storage.elf \ + /tmp/bloat_reports/ - name: Build example Shell App timeout-minutes: 5 run: scripts/examples/esp_example.sh shell sdkconfig.defaults @@ -112,6 +139,10 @@ jobs: mkdir -p example_binaries/$BUILD_TYPE-build cp examples/shell/esp32/build/chip-shell.elf \ example_binaries/$BUILD_TYPE-build/chip-shell.elf + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + esp32 default shell \ + example_binaries/$BUILD_TYPE-build/chip-shell.elf \ + /tmp/bloat_reports/ - name: Build example Temperature Measurement App timeout-minutes: 5 run: scripts/examples/esp_example.sh temperature-measurement-app sdkconfig.optimize.defaults @@ -120,6 +151,10 @@ jobs: mkdir -p example_binaries/$BUILD_TYPE-build cp examples/temperature-measurement-app/esp32/build/chip-temperature-measurement-app.elf \ example_binaries/$BUILD_TYPE-build/chip-temperature-measurement-app.elf + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + esp32 optimize temperature-measurement-app \ + example_binaries/$BUILD_TYPE-build/chip-temperature-measurement-app.elf \ + /tmp/bloat_reports/ - name: Build example IPv6 Only App timeout-minutes: 5 run: scripts/examples/esp_example.sh ipv6only-app sdkconfig.defaults @@ -128,6 +163,10 @@ jobs: mkdir -p example_binaries/$BUILD_TYPE-build cp examples/ipv6only-app/esp32/build/chip-ipv6only-app.elf \ example_binaries/$BUILD_TYPE-build/chip-ipv6only-app.elf + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + esp32 default ipv6only-app \ + example_binaries/$BUILD_TYPE-build/chip-ipv6only-app.elf \ + /tmp/bloat_reports/ - name: Binary artifact suffix id: outsuffix uses: haya14busa/action-cond@v1.0.0 @@ -145,3 +184,8 @@ jobs: ${{ env.BUILD_TYPE }}-example-build-${{ steps.outsuffix.outputs.value }} path: /tmp/output_binaries/${{ env.BUILD_TYPE }}-build + - name: Uploading Size Reports + uses: actions/upload-artifact@v2 + with: + name: Size,ESP32-Examples,${{ env.GH_EVENT_PR }},${{ env.GH_EVENT_HASH }},${{ env.GH_EVENT_PARENT }} + path: /tmp/bloat_reports/ diff --git a/.github/workflows/examples-infineon.yaml b/.github/workflows/examples-infineon.yaml index 196fc434169a77..033cd8ee7a0311 100644 --- a/.github/workflows/examples-infineon.yaml +++ b/.github/workflows/examples-infineon.yaml @@ -28,6 +28,11 @@ jobs: name: Infineon examples building timeout-minutes: 30 + env: + GH_EVENT_PR: ${{ github.event_name == 'pull_request' && github.event.number || 0 }} + GH_EVENT_HASH: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + GH_EVENT_PARENT: ${{ github.event_name == 'pull_request' && github.event.pull_request.base.sha || github.event.before }} + runs-on: ubuntu-latest if: github.actor != 'restyled-io[bot]' @@ -55,4 +60,13 @@ jobs: run: | scripts/run_in_build_env.sh \ "scripts/build/build_examples.py --no-log-timestamps --platform infineon --app lock build" + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + p6 default lock-app \ + out/infineon-p6board-lock/chip-p6-lock-example.out + - name: Uploading Size Reports + uses: actions/upload-artifact@v2 + with: + name: Size,P6-Examples,${{ env.GH_EVENT_PR }},${{ env.GH_EVENT_HASH }},${{ env.GH_EVENT_PARENT }} + path: | + out/infineon-p6board-lock/p6-default-lock-app-sizes.json diff --git a/.github/workflows/examples-k32w.yaml b/.github/workflows/examples-k32w.yaml index 11058a5a61129d..6e337556fb9947 100644 --- a/.github/workflows/examples-k32w.yaml +++ b/.github/workflows/examples-k32w.yaml @@ -29,6 +29,9 @@ jobs: env: BUILD_TYPE: gn_k32w + GH_EVENT_PR: ${{ github.event_name == 'pull_request' && github.event.number || 0 }} + GH_EVENT_HASH: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + GH_EVENT_PARENT: ${{ github.event_name == 'pull_request' && github.event.pull_request.base.sha || github.event.before }} runs-on: ubuntu-latest if: github.actor != 'restyled-io[bot]' @@ -56,16 +59,28 @@ jobs: .environment/pigweed-venv/*.log - name: Build example K32W Lock App timeout-minutes: 5 - run: scripts/examples/k32w_example.sh - examples/lock-app/k32w out/lock_app_debug + run: | + scripts/examples/k32w_example.sh examples/lock-app/k32w out/lock_app_debug + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + k32w k32w061+debug lock-app \ + out/lock_app_debug/chip-k32w061-lock-example \ + /tmp/bloat_reports/ - name: Build example K32W Shell App timeout-minutes: 5 - run: scripts/examples/k32w_example.sh - examples/shell/k32w out/shell_app_debug + run: | + scripts/examples/k32w_example.sh examples/shell/k32w out/shell_app_debug + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + k32w k32w061+debug shell \ + out/shell_app_debug/chip-k32w061-shell-example \ + /tmp/bloat_reports/ - name: Build example K32W Lighting App with Secure Element timeout-minutes: 5 - run: scripts/examples/k32w_se_example.sh - examples/lighting-app/k32w out/lighting_app_se_release + run: | + scripts/examples/k32w_se_example.sh examples/lighting-app/k32w out/lighting_app_se_release + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + k32w k32w061+se05x+release lighting-app \ + out/lighting_app_se_release/chip-k32w061-light-example \ + /tmp/bloat_reports/ - name: Binary artifact suffix id: outsuffix uses: haya14busa/action-cond@v1.0.0 @@ -82,3 +97,9 @@ jobs: path: | out/lock_app_debug/chip-k32w061-lock-example.out out/lock_app_debug/chip-k32w061-lock-example.out.map + - name: Uploading Size Reports + uses: actions/upload-artifact@v2 + with: + name: Size,K32W-Examples,${{ env.GH_EVENT_PR }},${{ env.GH_EVENT_HASH }},${{ env.GH_EVENT_PARENT }} + path: | + /tmp/bloat_reports/ diff --git a/.github/workflows/examples-linux-standalone.yaml b/.github/workflows/examples-linux-standalone.yaml index ff751c8578dc31..1f77a89bbfd807 100644 --- a/.github/workflows/examples-linux-standalone.yaml +++ b/.github/workflows/examples-linux-standalone.yaml @@ -29,6 +29,9 @@ jobs: env: BUILD_TYPE: gn_linux + GH_EVENT_PR: ${{ github.event_name == 'pull_request' && github.event.number || 0 }} + GH_EVENT_HASH: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + GH_EVENT_PARENT: ${{ github.event_name == 'pull_request' && github.event.pull_request.base.sha || github.event.before }} runs-on: ubuntu-latest if: github.actor != 'restyled-io[bot]' @@ -57,36 +60,62 @@ jobs: .environment/pigweed-venv/*.log - name: Build example Standalone Echo Client timeout-minutes: 5 - run: - scripts/examples/gn_build_example.sh examples/chip-tool - out/chip_tool_debug + run: | + scripts/examples/gn_build_example.sh examples/chip-tool out/chip_tool_debug + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + linux debug chip-tool \ + out/chip_tool_debug/chip-tool \ + /tmp/bloat_reports/ - name: Build example Standalone Shell timeout-minutes: 5 - run: - scripts/examples/gn_build_example.sh examples/shell/standalone - out/shell_debug + run: | + scripts/examples/gn_build_example.sh examples/shell/standalone out/shell_debug + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + linux debug shell \ + out/shell_debug/chip-shell \ + /tmp/bloat_reports/ - name: Build example Standalone All Clusters Server timeout-minutes: 5 - run: - scripts/examples/gn_build_example.sh examples/all-clusters-app/linux - out/all_clusters_debug chip_bypass_rendezvous=true + run: | + scripts/examples/gn_build_example.sh examples/all-clusters-app/linux out/all_clusters_debug \ + chip_bypass_rendezvous=true + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + linux debug all-clusters-app \ + out/all_clusters_debug/chip-all-clusters-app \ + /tmp/bloat_reports/ - name: Build example TV app timeout-minutes: 5 - run: + run: | scripts/examples/gn_build_example.sh examples/tv-app/linux out/tv_app_debug + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + linux debug tv-app \ + out/tv_app_debug/chip-tv-app \ + /tmp/bloat_reports/ - name: Build example lighting app with RPCs timeout-minutes: 5 - run: - scripts/examples/gn_build_example.sh examples/lighting-app/linux - out/lighting_app_debug_rpc 'import("//with_pw_rpc.gni")' + run: | + scripts/examples/gn_build_example.sh examples/lighting-app/linux out/lighting_app_debug_rpc \ + 'import("//with_pw_rpc.gni")' + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + linux debug+rpc lighting-app \ + out/lighting_app_debug_rpc/chip-lighting-app \ + /tmp/bloat_reports/ - name: Build example Standalone Bridge timeout-minutes: 5 - run: + run: | scripts/examples/gn_build_example.sh examples/bridge-app/linux out/bridge_debug + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + linux debug+rpc bridge-app \ + out/bridge_debug/chip-bridge-app \ + /tmp/bloat_reports/ - name: Build example OTA Provider timeout-minutes: 5 - run: + run: | scripts/examples/gn_build_example.sh examples/ota-provider-app/linux out/ota_provider_debug + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + linux debug ota-provider-app \ + out/ota_provider_debug/chip-ota-provider-app \ + /tmp/bloat_reports/ - name: Binary artifact suffix id: outsuffix uses: haya14busa/action-cond@v1.0.0 @@ -103,3 +132,9 @@ jobs: path: | out/all_clusters_debug/all-clusters-server out/all_clusters_debug/all-clusters-server.map + - name: Uploading Size Reports + uses: actions/upload-artifact@v2 + with: + name: Size,Linux-Examples,${{ env.GH_EVENT_PR }},${{ env.GH_EVENT_HASH }},${{ env.GH_EVENT_PARENT }} + path: | + /tmp/bloat_reports/ diff --git a/.github/workflows/examples-mbed.yaml b/.github/workflows/examples-mbed.yaml index 6cec70a4589264..6df9897a110d99 100644 --- a/.github/workflows/examples-mbed.yaml +++ b/.github/workflows/examples-mbed.yaml @@ -32,6 +32,9 @@ jobs: BUILD_TYPE: mbedos APP_PROFILE: release APP_TARGET: CY8CPROTO_062_4343W + GH_EVENT_PR: ${{ github.event_name == 'pull_request' && github.event.number || 0 }} + GH_EVENT_HASH: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + GH_EVENT_PARENT: ${{ github.event_name == 'pull_request' && github.event.pull_request.base.sha || github.event.before }} runs-on: ubuntu-latest if: github.actor != 'restyled-io[bot]' @@ -39,6 +42,7 @@ jobs: container: image: connectedhomeip/chip-build-mbed-os:latest volumes: + - "/tmp/bloat_reports:/tmp/bloat_reports" - "/tmp/output_binaries:/tmp/output_binaries" steps: @@ -62,11 +66,21 @@ jobs: - name: Build lock-app example timeout-minutes: 10 - run: scripts/examples/mbed_example.sh -a=lock-app -b=$APP_TARGET -p=$APP_PROFILE + run: | + scripts/examples/mbed_example.sh -a=lock-app -b=$APP_TARGET -p=$APP_PROFILE + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + mbed $APP_TARGET+$APP_PROFILE lock-app \ + examples/lock-app/mbed/build-CY8CPROTO_062_4343W/release/chip-mbed-lock-app-example \ + /tmp/bloat_reports/ - name: Build lighting-app example timeout-minutes: 10 - run: scripts/examples/mbed_example.sh -a=lighting-app -b=$APP_TARGET -p=$APP_PROFILE + run: | + scripts/examples/mbed_example.sh -a=lighting-app -b=$APP_TARGET -p=$APP_PROFILE + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + mbed $APP_TARGET+$APP_PROFILE lighting-app \ + examples/lighting-app/mbed/build-CY8CPROTO_062_4343W/release/chip-mbed-lighting-app-example \ + /tmp/bloat_reports/ - name: Copy aside build products run: | @@ -91,3 +105,10 @@ jobs: ${{ env.BUILD_TYPE }}-binaries-${{env.APP_TARGET}}-${{ env.APP_PROFILE}}-build-${{ steps.outsuffix.outputs.value }} path: /tmp/output_binaries/${{ env.BUILD_TYPE }}-build + + - name: Uploading Size Reports + uses: actions/upload-artifact@v2 + with: + name: Size,Mbed-Examples,${{ env.GH_EVENT_PR }},${{ env.GH_EVENT_HASH }},${{ env.GH_EVENT_PARENT }} + path: | + /tmp/bloat_reports/ diff --git a/.github/workflows/examples-nrfconnect.yaml b/.github/workflows/examples-nrfconnect.yaml index 21cbfaeefcd8fb..16bb09a5ee7e9b 100644 --- a/.github/workflows/examples-nrfconnect.yaml +++ b/.github/workflows/examples-nrfconnect.yaml @@ -29,6 +29,9 @@ jobs: env: BUILD_TYPE: nrfconnect + GH_EVENT_PR: ${{ github.event_name == 'pull_request' && github.event.number || 0 }} + GH_EVENT_HASH: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + GH_EVENT_PARENT: ${{ github.event_name == 'pull_request' && github.event.pull_request.base.sha || github.event.before }} runs-on: ubuntu-latest if: github.actor != 'restyled-io[bot]' @@ -60,37 +63,87 @@ jobs: run: scripts/run_in_build_env.sh "python3 scripts/setup/nrfconnect/update_ncs.py --update --shallow" - name: Build example nRF Connect SDK Lock App on nRF52840 DK timeout-minutes: 10 - run: scripts/examples/nrfconnect_example.sh lock-app nrf52840dk_nrf52840 + run: | + scripts/examples/nrfconnect_example.sh lock-app nrf52840dk_nrf52840 + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + nrfconnect nrf52840dk_nrf52840 lock-app \ + examples/lock-app/nrfconnect/build/nrf52840dk_nrf52840/zephyr/zephyr.elf \ + /tmp/bloat_reports/ - name: Build example nRF Connect SDK Lighting App on nRF52840 DK timeout-minutes: 10 - run: scripts/examples/nrfconnect_example.sh lighting-app nrf52840dk_nrf52840 + run: | + scripts/examples/nrfconnect_example.sh lighting-app nrf52840dk_nrf52840 + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + nrfconnect nrf52840dk_nrf52840 lighting-app \ + examples/lighting-app/nrfconnect/build/nrf52840dk_nrf52840/zephyr/zephyr.elf \ + /tmp/bloat_reports/ - name: Build example nRF Connect SDK Lighting App on nRF52840 DK with RPC timeout-minutes: 10 - run: scripts/examples/nrfconnect_example.sh lighting-app nrf52840dk_nrf52840 -DOVERLAY_CONFIG=rpc.overlay + run: | + scripts/examples/nrfconnect_example.sh lighting-app nrf52840dk_nrf52840 -DOVERLAY_CONFIG=rpc.overlay + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + nrfconnect nrf52840dk_nrf52840+rpc lighting-app \ + examples/lighting-app/nrfconnect/build/nrf52840dk_nrf52840/zephyr/zephyr.elf \ + /tmp/bloat_reports/ - name: Build example nRF Connect SDK Shell on nRF52840 DK timeout-minutes: 10 - run: scripts/examples/nrfconnect_example.sh shell nrf52840dk_nrf52840 + run: | + scripts/examples/nrfconnect_example.sh shell nrf52840dk_nrf52840 + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + nrfconnect nrf52840dk_nrf52840 shell \ + examples/shell/nrfconnect/build/nrf52840dk_nrf52840/zephyr/zephyr.elf \ + /tmp/bloat_reports/ - name: Build example nRF Connect SDK Pigweed on nRF52840 DK timeout-minutes: 10 - run: scripts/examples/nrfconnect_example.sh pigweed-app nrf52840dk_nrf52840 + run: | + scripts/examples/nrfconnect_example.sh pigweed-app nrf52840dk_nrf52840 + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + nrfconnect nrf52840dk_nrf52840 pigweed-app \ + examples/pigweed-app/nrfconnect/build/nrf52840dk_nrf52840/zephyr/zephyr.elf \ + /tmp/bloat_reports/ - name: Build example nRF Connect SDK Lock App on nRF5340 DK timeout-minutes: 10 - run: scripts/examples/nrfconnect_example.sh lock-app nrf5340dk_nrf5340_cpuapp + run: | + scripts/examples/nrfconnect_example.sh lock-app nrf5340dk_nrf5340_cpuapp + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + nrfconnect nrf5340dk_nrf5340_cpuapp lock-app \ + examples/lock-app/nrfconnect/build/nrf5340dk_nrf5340_cpuapp/zephyr/zephyr.elf \ + /tmp/bloat_reports/ - name: Build example nRF Connect SDK Lighting App on nRF5340 DK timeout-minutes: 10 - run: scripts/examples/nrfconnect_example.sh lighting-app nrf5340dk_nrf5340_cpuapp + run: | + scripts/examples/nrfconnect_example.sh lighting-app nrf5340dk_nrf5340_cpuapp + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + nrfconnect nrf5340dk_nrf5340_cpuapp lighting-app \ + examples/lighting-app/nrfconnect/build/nrf5340dk_nrf5340_cpuapp/zephyr/zephyr.elf \ + /tmp/bloat_reports/ - name: Build example nRF Connect SDK Shell on nRF5340 DK timeout-minutes: 10 - run: scripts/examples/nrfconnect_example.sh shell nrf5340dk_nrf5340_cpuapp + run: | + scripts/examples/nrfconnect_example.sh shell nrf5340dk_nrf5340_cpuapp + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + nrfconnect nrf5340dk_nrf5340_cpuapp shell \ + examples/shell/nrfconnect/build/nrf5340dk_nrf5340_cpuapp/zephyr/zephyr.elf \ + /tmp/bloat_reports/ - name: Build example nRF Connect SDK Pump App on nRF52840 DK - timeout-minutes: 5 - run: scripts/examples/nrfconnect_example.sh pump-app nrf52840dk_nrf52840 + timeout-minutes: 10 + run: | + scripts/examples/nrfconnect_example.sh pump-app nrf52840dk_nrf52840 + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + nrfconnect nrf52840dk_nrf52840 pump-app \ + examples/pump-app/nrfconnect/build/nrf52840dk_nrf52840/zephyr/zephyr.elf \ + /tmp/bloat_reports/ - name: Build example nRF Connect SDK Pump Controller App on nRF52840 DK - timeout-minutes: 5 - run: scripts/examples/nrfconnect_example.sh pump-controller-app nrf52840dk_nrf52840 + timeout-minutes: 10 + run: | + scripts/examples/nrfconnect_example.sh pump-controller-app nrf52840dk_nrf52840 + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + nrfconnect nrf52840dk_nrf52840 pump-controller-app \ + examples/pump-controller-app/nrfconnect/build/nrf52840dk_nrf52840/zephyr/zephyr.elf \ + /tmp/bloat_reports/ - name: Run unit tests for Zephyr native_posix_64 platform timeout-minutes: 10 - run: + run: | scripts/run_in_build_env.sh "scripts/tests/nrfconnect_native_posix_tests.sh native_posix_64" - name: Copy aside build products run: | @@ -113,3 +166,9 @@ jobs: ${{ env.BUILD_TYPE }}-example-build-${{ steps.outsuffix.outputs.value }} path: /tmp/output_binaries/${{ env.BUILD_TYPE }}-build + - name: Uploading Size Reports + uses: actions/upload-artifact@v2 + with: + name: Size,nRFConnect-Examples,${{ env.GH_EVENT_PR }},${{ env.GH_EVENT_HASH }},${{ env.GH_EVENT_PARENT }} + path: | + /tmp/bloat_reports/ diff --git a/.github/workflows/examples-qpg.yaml b/.github/workflows/examples-qpg.yaml index 50518558390431..94e71a2df49d15 100644 --- a/.github/workflows/examples-qpg.yaml +++ b/.github/workflows/examples-qpg.yaml @@ -29,6 +29,9 @@ jobs: env: BUILD_TYPE: gn_qpg + GH_EVENT_PR: ${{ github.event_name == 'pull_request' && github.event.number || 0 }} + GH_EVENT_HASH: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + GH_EVENT_PARENT: ${{ github.event_name == 'pull_request' && github.event.pull_request.base.sha || github.event.before }} runs-on: ubuntu-latest if: github.actor != 'restyled-io[bot]' @@ -56,16 +59,29 @@ jobs: .environment/pigweed-venv/*.log - name: Build example QPG6100 Lock App timeout-minutes: 5 - run: scripts/examples/gn_build_example.sh - examples/lock-app/qpg out/lock_app_debug qpg_target_ic=\"qpg6100\" + run: | + scripts/examples/gn_build_example.sh examples/lock-app/qpg out/lock_app_debug qpg_target_ic=\"qpg6100\" + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + qpg qpg6100+debug lock-app \ + out/lock_app_debug/chip-qpg6100-lock-example.out \ + /tmp/bloat_reports/ - name: Build example QPG6100 Lighting App timeout-minutes: 5 - run: scripts/examples/gn_build_example.sh - examples/lighting-app/qpg out/lighting_app_debug qpg_target_ic=\"qpg6100\" + run: | + scripts/examples/gn_build_example.sh examples/lighting-app/qpg out/lighting_app_debug qpg_target_ic=\"qpg6100\" + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + qpg qpg6100+debug lighting-app \ + out/lighting_app_debug/chip-qpg6100-lighting-example.out \ + /tmp/bloat_reports/ - name: Build example QPG6100 persistent-storage timeout-minutes: 5 - run: scripts/examples/gn_build_example.sh - examples/persistent-storage/qpg out/persistent-storage_app_debug qpg_target_ic=\"qpg6100\" + run: | + scripts/examples/gn_build_example.sh examples/persistent-storage/qpg out/persistent-storage_app_debug \ + qpg_target_ic=\"qpg6100\" + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + qpg qpg6100+debug persistent-storage-app \ + out/persistent-storage_app_debug/chip-qpg6100-persistent_storage-example.out \ + /tmp/bloat_reports/ - name: Binary artifact suffix id: outsuffix uses: haya14busa/action-cond@v1.0.0 @@ -82,3 +98,9 @@ jobs: path: | out/lighting_app_debug/chip-qpg6100-lighting-example.out out/lighting_app_debug/chip-qpg6100-lighting-example.out.map + - name: Uploading Size Reports + uses: actions/upload-artifact@v2 + with: + name: Size,QPG-Examples,${{ env.GH_EVENT_PR }},${{ env.GH_EVENT_HASH }},${{ env.GH_EVENT_PARENT }} + path: | + /tmp/bloat_reports/ diff --git a/.github/workflows/examples-telink.yaml b/.github/workflows/examples-telink.yaml index dd971792abc77d..b279f8f235c8cd 100644 --- a/.github/workflows/examples-telink.yaml +++ b/.github/workflows/examples-telink.yaml @@ -27,6 +27,9 @@ jobs: name: Telink env: BUILD_TYPE: telink + GH_EVENT_PR: ${{ github.event_name == 'pull_request' && github.event.number || 0 }} + GH_EVENT_HASH: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + GH_EVENT_PARENT: ${{ github.event_name == 'pull_request' && github.event.pull_request.base.sha || github.event.before }} runs-on: ubuntu-latest if: github.actor != 'restyled-io[bot]' @@ -41,7 +44,17 @@ jobs: uses: actions/checkout@v2 with: submodules: true - - name: Build example Telink Lighting App + - name: Build example Telink Lighting App run: | ./scripts/run_in_build_env.sh \ "./scripts/build/build_examples.py --no-log-timestamps --platform telink --app light build" + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + telink tlsr9518adk80d lighting-app \ + out/telink-tlsr9518adk80d-light/zephyr/zephyr.elf \ + /tmp/bloat_reports/ + - name: Uploading Size Reports + uses: actions/upload-artifact@v2 + with: + name: Size,Telink-Examples,${{ env.GH_EVENT_PR }},${{ env.GH_EVENT_HASH }},${{ env.GH_EVENT_PARENT }} + path: | + /tmp/bloat_reports/ diff --git a/scripts/helpers/bloat_check.py b/scripts/helpers/bloat_check.py index 52b7d0f46047c5..db1157008d2a9f 100755 --- a/scripts/helpers/bloat_check.py +++ b/scripts/helpers/bloat_check.py @@ -268,6 +268,10 @@ def main(): pull_artifact_re = re.compile('^(.*)-pull-(\\d+)$') binary_count = 0 for a in artifacts: + # Ignore size reports; they are handled by a separate script. + if a.name.startswith('Size,'): + continue + # logs cleanup after 3 days is_log = a.name.endswith('-logs') diff --git a/scripts/requirements.txt b/scripts/requirements.txt index edcd7ae09b9348..7905aff72b6862 100644 --- a/scripts/requirements.txt +++ b/scripts/requirements.txt @@ -41,6 +41,7 @@ protobuf # scripts/tools/memory anytree cxxfilt +ghapi pandas ; platform_machine != 'aarch64' # scripts/build diff --git a/scripts/tools/memory/collect.py b/scripts/tools/memory/collect.py index 03f76e3b672c99..5c0a48e39183d2 100755 --- a/scripts/tools/memory/collect.py +++ b/scripts/tools/memory/collect.py @@ -36,6 +36,7 @@ def main(argv): try: config = memdf.collect.parse_args({ **memdf.select.CONFIG, + **memdf.report.REPORT_DEMANGLE_CONFIG, **memdf.report.OUTPUT_CONFIG }, argv) memdf.report.write_dfs(config, memdf.collect.collect_files(config)) diff --git a/scripts/tools/memory/gh_report.py b/scripts/tools/memory/gh_report.py new file mode 100755 index 00000000000000..7f395a684b0540 --- /dev/null +++ b/scripts/tools/memory/gh_report.py @@ -0,0 +1,548 @@ +#!/usr/bin/env python3 +# +# Copyright (c) 2021 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Generate reports from size artifacts.""" + +import io +import itertools +import json +import logging +import os +import os.path +import sqlite3 +import sys +import zipfile + +from pathlib import Path +from typing import Dict, IO, Iterable, Optional, Union + +import dateutil # type: ignore +import fastcore # type: ignore +import ghapi.all # type: ignore +import pandas as pd # type: ignore + +import memdf.report +import memdf.util.config +import memdf.util.sqlite +from memdf import Config, ConfigDescription + +GITHUB_CONFIG: ConfigDescription = { + Config.group_def('github'): { + 'title': 'github options', + }, + 'github.token': { + 'help': 'Github API token, or "SKIP" to suppress connecting to github', + 'metavar': 'TOKEN', + 'default': '', + 'argparse': { + 'alias': ['--github-api-token', '--token'], + }, + }, + 'github.repository': { + 'help': 'Github repostiory', + 'metavar': 'OWNER/REPO', + 'default': '', + 'argparse': { + 'alias': ['--repo'], + }, + }, + 'github.comment': { + 'help': 'Send output as github PR comments', + 'default': False, + 'argparse': { + 'alias': ['--comment'], + }, + }, + 'github.keep': { + 'help': 'Leave PR artifacts after commenting', + 'default': False, + 'argparse': { + 'alias': ['--keep'], + }, + }, + Config.group_map('report'): { + 'group': 'output' + }, + 'report.pr': { + 'help': 'Report on pull requests', + 'default': False, + 'argparse': { + 'alias': ['--pr', '--pull-request'], + }, + }, + 'report.push': { + 'help': 'Report on pushes', + 'default': False, + 'argparse': { + 'alias': ['--push'] + }, + }, + 'report.query': { + 'help': 'Run an SQL query', + 'default': [], + 'argparse': { + 'alias': ['--query', '--sql'] + }, + }, + 'report.increases': { + 'help': 'Highlight large increases', + 'metavar': 'PERCENT', + 'default': 0.0, + 'argparse': { + 'alias': ['--threshold'], + 'type': float, + }, + }, +} + + +class SizeDatabase(memdf.util.sqlite.Database): + """A database for recording and comparing size reports.""" + on_open = ["PRAGMA foreign_keys = ON", "PRAGMA encoding = 'UTF-8'"] + on_writable = [ + """ + -- A ‘thing’ identifies the kind of built object. + -- Builds of the same thing are comparable. + CREATE TABLE IF NOT EXISTS thing ( + id INTEGER PRIMARY KEY, + platform TEXT NOT NULL, -- Build platform + config TEXT NOT NULL, -- Build configuration discriminator + target TEXT NOT NULL, -- Build target + UNIQUE(platform, config, target) + ) + """, """ + -- A ‘build’ identifies a built instance of a thing at some point. + CREATE TABLE IF NOT EXISTS build ( + id INTEGER PRIMARY KEY, + thing_id INTEGER REFERENCES thing(id), + hash TEXT NOT NULL, -- Commit hash + parent TEXT NOT NULL, -- Parent commit hash + pr INTEGER DEFAULT 0, -- Github PR number + time INTEGER NOT NULL, -- Unix-epoch timestamp + artifact INTEGER DEFAULT 0, -- Github artifact ID + commented INTEGER DEFAULT 0, + UNIQUE(thing_id, hash, parent, pr, time, artifact) + ) + """, """ + -- A ‘size’ entry gives the size of a section for a particular build. + CREATE TABLE IF NOT EXISTS size ( + build_id INTEGER REFERENCES build(id), + name TEXT NOT NULL, -- Section name + size INTEGER NOT NULL, -- Section size in bytes + PRIMARY KEY (build_id, name) + ) + """ + ] + + def __init__(self, config: Config): + super().__init__(config['database.file']) + self.config = config + self.gh = gh_open(config) + self.deleted_artifacts: set[int] = set() + + def add_sizes(self, **kwargs): + """ + Add a size report to the database. + + The incoming arguments must contain the non-ID column names from + ‘thing’ and ‘build’ tables, plus a 'sizes' entry that is a sequence + of mappings containing 'name' and 'size'. + """ + td = {k: kwargs[k] for k in ('platform', 'config', 'target')} + thing = self.store_and_return_id('thing', **td) + bd = {k: kwargs[k] for k in ('hash', 'parent', 'time')} + cd = {k: kwargs.get(k, 0) for k in ('pr', 'artifact', 'commented')} + build = self.store_and_return_id('build', thing_id=thing, **bd, **cd) + for d in kwargs['sizes']: + self.store('size', build_id=build, **d) + + def add_sizes_from_json(self, s: Union[bytes, str], origin: Dict): + """Add sizes from a JSON size report.""" + r = origin.copy() + r.update(json.loads(s)) + by = r.get('by', 'section') + r['sizes'] = [{ + 'name': s[by], + 'size': s['size'] + } for s in r['frames'][by]] + self.add_sizes(**r) + + def add_sizes_from_zipfile(self, f: Union[IO, Path], origin: Dict): + """Add size reports from a zip.""" + with zipfile.ZipFile(f, 'r') as zip_file: + for i in zip_file.namelist(): + if i.endswith('-sizes.json'): + origin['member'] = i + with zip_file.open(i) as member: + self.add_sizes_from_json(member.read(), origin) + + def add_sizes_from_file(self, filename: str): + """Add size reports from a file.""" + origin = {'file': filename} + path = Path(filename) + if path.suffix == '.json': + logging.info('Reading JSON %s', path) + with open(path) as f: + self.add_sizes_from_json(f.read(), origin) + elif path.suffix == '.zip': + logging.info('Reading ZIP %s', path) + self.add_sizes_from_zipfile(path, origin) + else: + logging.warning('Unknown file type "%s" ignored', filename) + + def add_sizes_from_github(self): + """Read size report artifacts from github.""" + if not self.gh: + return + + # Size artifacts have names of the form + # Size,{group},{pr},{commit_hash},{parent_hash} + # Record them keyed by group and commit_hash to match them up + # after we have the entire list. + size_artifacts: Dict[str, Dict[str, fastcore.basics.AttrDict]] = {} + for i in ghapi.all.paged(self.gh.actions.list_artifacts_for_repo): + if not i.artifacts: + break + for a in i.artifacts: + if a.name.startswith('Size,'): + _, group, pr, commit, parent, *_ = (a.name + ',').split( + ',', 5) + a.parent = parent + a.pr = pr + a.created_at = dateutil.parser.isoparse(a.created_at) + if group not in size_artifacts: + size_artifacts[group] = {} + size_artifacts[group][commit] = a + + # Determine required size artifacts. + required_artifact_ids: set[int] = set() + for group, group_reports in size_artifacts.items(): + logging.info('Group %s', group) + for report in group_reports.values(): + if self.config['report.pr' if report.pr else 'report.push']: + if report.parent not in group_reports: + logging.info(' No match for %s', report.name) + continue + # We have size information for both this report and its + # parent, so ensure that both artifacts are downloaded. + parent = group_reports[report.parent] + required_artifact_ids.add(report.id) + required_artifact_ids.add(parent.id) + logging.info(' Match %s', report.parent) + logging.info(' %s %s', report.id, report.name) + logging.info(' %s %s', parent.id, parent.name) + + # Download and add required artifacts. + for i in required_artifact_ids: + logging.debug('Download artifact %d', i) + try: + blob = self.gh.actions.download_artifact(i, 'zip') + except Exception as e: + logging.error('Failed to download artifact %d: %s', i, e) + self.add_sizes_from_zipfile(io.BytesIO(blob), {'artifact': i}) + + def read_inputs(self): + """Read size report from github and/or local files.""" + self.add_sizes_from_github() + for filename in self.config['args.inputs']: + self.add_sizes_from_file(filename) + self.commit() + + def select_matching_commits(self): + """Find matching builds, where one's commit is the other's parent.""" + return self.execute(''' + SELECT DISTINCT c.pr AS pr, c.hash AS hash, p.hash AS parent + FROM build c + INNER JOIN build p ON p.hash = c.parent + WHERE c.commented = 0 + ORDER BY c.pr, c.hash, p.hash ASC + ''') + + def set_commented(self, build_ids: Iterable[int]): + """Set the commented flag for the given builds.""" + if not build_ids: + return + for build_id in build_ids: + self.execute('UPDATE build SET commented = 1 WHERE id = ?', + (build_id, )) + self.commit() + + def delete_stale_builds(self, build_ids: Iterable[int]): + """Delete stale builds.""" + if not build_ids: + return + for build_id in build_ids: + logging.info('Deleting obsolete build %d', build_id) + self.execute('DELETE FROM size WHERE build_id = ?', (build_id, )) + self.execute('DELETE FROM build WHERE id = ?', (build_id, )) + self.commit() + + def delete_artifact(self, artifact_id: int): + if self.gh and artifact_id not in self.deleted_artifacts: + self.deleted_artifacts.add(artifact_id) + self.gh.actions.delete_artifact(artifact_id) + + def delete_stale_artifacts(self, stale_artifacts: Iterable[int]): + if not self.config['github.keep']: + for artifact_id in stale_artifacts: + logging.info('Deleting obsolete artifact %d', artifact_id) + self.delete_artifact(artifact_id) + + +def gh_open(config: Config) -> Optional[ghapi.core.GhApi]: + """Return a GhApi, if so configured.""" + gh: Optional[ghapi.core.GhApi] = None + if config['github.repository']: + owner, repo = config.get('github.repository').split('/', 1) + config.put('github.owner', owner) + config.put('github.repo', repo) + if not config['github.token']: + config['github.token'] = os.environ.get('GITHUB_TOKEN') + if not config['github.token']: + logging.error('Missing --github-token') + return None + token = config['github.token'] + if token != 'SKIP': + gh = ghapi.all.GhApi(owner=owner, + repo=repo, + token=config['github.token']) + return gh + + +def gh_get_comments_for_pr(gh: ghapi.core.GhApi, pr: int): + return itertools.chain.from_iterable( + ghapi.all.paged(gh.issues.list_comments, pr)) + + +def percent_change(a: int, b: int) -> float: + if a == 0: + return 0.0 if b == 0 else float('inf') + return 100. * (b - a) / a + + +def changes_for_commit(db: SizeDatabase, pr: int, commit: str, + parent: str) -> pd.DataFrame: + """Return a DataFrame with size changes between the given commits.""" + cur = db.execute( + ''' + SELECT DISTINCT + t.id AS thing, + cb.artifact AS artifact, + pb.id AS parent_build, + cb.id AS commit_build, + t.platform, t.config, t.target, + cs.name, + ps.size AS parent_size, + cs.size AS commit_size, + cs.size - ps.size AS change + FROM thing t + INNER JOIN build cb ON cb.thing_id = t.id + INNER JOIN build pb ON pb.thing_id = t.id AND pb.hash = cb.parent + INNER JOIN size cs ON cs.build_id = cb.id + INNER JOIN size ps ON ps.build_id = pb.id AND cs.name = ps.name + WHERE cb.hash = ? AND pb.hash = ? + ORDER BY t.platform, t.config, t.target, + cs.name, cb.time DESC, pb.time DESC + ''', (commit, parent)) + + keep = ('platform', 'target', 'config', 'name', 'parent_size', + 'commit_size', 'change') + things: set[int] = set() + artifacts: set[int] = set() + builds: set[int] = set() + stale_builds: set[int] = set() + stale_artifacts: set[int] = set() + previous: Optional[sqlite3.Row] = None + rows = [] + + for row in cur.fetchall(): + row = sqlite3.Row(cur, row) + things.add(row['thing']) + if (previous is not None and row['thing'] == previous['thing'] + and row['name'] == previous['name']): + # This is duplicate build, older because we sort descending, + # presumably from a partial workflow re-run. + if row['parent_build'] != previous['parent_build']: + stale_builds.add(row['parent_build']) + if row['commit_build'] != previous['commit_build']: + stale_builds.add(row['commit_build']) + stale_artifacts.add(row['artifact']) + else: + previous = row + new = [row[k] for k in keep] + new.append(percent_change(row['parent_size'], row['commit_size'])) + rows.append(new) + artifacts.add(row['artifact']) + builds.add(row['commit_build']) + + db.delete_stale_builds(stale_builds) + db.delete_stale_artifacts(stale_artifacts) + + df = pd.DataFrame(rows, + columns=('platform', 'target', 'config', 'section', + parent[:8], commit[:8], 'change', '% change')) + df.attrs = { + 'name': f'{pr},{commit},{parent}', + 'title': (f'PR #{pr}: ' if pr else '') + + f'Size comparison from {commit} to {parent}', + 'things': things, + 'builds': builds, + 'artifacts': artifacts, + 'pr': pr, + 'commit': commit, + 'parent': parent, + } + return df + + +def gh_send_change_report(db: SizeDatabase, df: pd.DataFrame, + tdf: pd.DataFrame) -> bool: + """Send a change report as a github comment.""" + if not db.gh: + return False + pr = df.attrs['pr'] + title = df.attrs['title'] + existing_comment_id = 0 + for comment in gh_get_comments_for_pr(db.gh, pr): + if comment.body.partition('\n')[0] == df.attrs['title']: + existing_comment_id = comment.id + title = comment.body + break + + md = io.StringIO() + md.write(title) + md.write('\n') + + if tdf is not None and not tdf.empty: + md.write(f'\n**{tdf.attrs["title"]}:**\n\n') + memdf.report.write_df(db.config, + tdf, + md, + 'pipe', + hierify=True, + title=False, + tabulate={'floatfmt': '5.1f'}) + + count = len(df.attrs['things']) + summary = f'{count} build{"" if count == 1 else "s"}' + md.write(f'\n
\n{summary}\n\n') + memdf.report.write_df(db.config, + df, + md, + 'pipe', + hierify=True, + title=False, + tabulate={'floatfmt': '5.1f'}) + md.write('\n
\n') + text = md.getvalue() + md.close() + + try: + if existing_comment_id: + db.gh.issues.update_comment(existing_comment_id, text) + else: + db.gh.issues.create_comment(pr, text) + return True + except Exception: + return False + + +def report_matching_commits(db: SizeDatabase) -> Dict[str, pd.DataFrame]: + """Report on all new comparable commits.""" + if not (db.config['report.pr'] or db.config['report.push']): + return {} + dfs = {} + for pr, commit, parent in db.select_matching_commits().fetchall(): + if not db.config['report.pr' if pr else 'report.push']: + continue + df = changes_for_commit(db, pr, commit, parent) + dfs[df.attrs['name']] = df + + if threshold := db.config['report.increases']: + tdf = df[df['% change'] > threshold] + else: + tdf = None + if tdf is not None and not tdf.empty: + commit = df.attrs['commit'] + parent = df.attrs['parent'] + tdf.attrs['name'] = f'L,{commit},{parent}' + tdf.attrs['title'] = ( + f'Increases above {threshold:.1f}% from {commit} to {parent}') + dfs[tdf.attrs['name']] = tdf + + if pr and db.config['github.comment']: + if gh_send_change_report(db, df, tdf): + # Mark the originating builds, and remove the originating + # artifacts, so that they don't generate duplicate report + # comments. + db.set_commented(df.attrs['builds']) + if not db.config['github.keep']: + for artifact_id in df.attrs['artifacts']: + logging.info('Deleting artifact %d', artifact_id) + db.delete_artifact(artifact_id) + return dfs + + +def report_queries(db: SizeDatabase) -> Dict[str, pd.DataFrame]: + """Perform any requested SQL queries.""" + dfs = {} + q = 0 + for query in db.config['report.query']: + q += 1 + cur = db.execute(query) + columns = [i[0] for i in cur.description] + rows = cur.fetchall() + if rows: + df = pd.DataFrame(rows, columns=columns) + df.attrs = {'name': f'query{q}', 'title': query} + dfs[df.attrs['name']] = df + db.commit() + return dfs + + +def main(argv): + status = 0 + try: + config = Config().init({ + **memdf.util.config.CONFIG, + **memdf.util.sqlite.CONFIG, + **memdf.report.OUTPUT_CONFIG, + **GITHUB_CONFIG, + }) + config.argparse.add_argument('inputs', metavar='FILE', nargs='*') + config.parse(argv) + + dfs = {} + with SizeDatabase(config) as db: + db.read_inputs() + dfs.update(report_matching_commits(db)) + dfs.update(report_queries(db)) + + memdf.report.write_dfs(config, + dfs, + hierify=True, + title=True, + tabulate={'floatfmt': '5.1f'}) + + except Exception as exception: + status = 1 + raise exception + + return status + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/scripts/tools/memory/gh_sizes.py b/scripts/tools/memory/gh_sizes.py new file mode 100755 index 00000000000000..65f5415c6c07c6 --- /dev/null +++ b/scripts/tools/memory/gh_sizes.py @@ -0,0 +1,195 @@ +#!/usr/bin/env python3 +# +# Copyright (c) 2021 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +This is similar to scripts/tools/memory/report_summary.py, but generates +a specific output format with a simplified interface for use in github +workflows. + +Usage: gh_sizes.py ‹platform› ‹config› ‹target› ‹binary› [‹output›] [‹option›…] + ‹platform› - Platform name, corresponding to a config file + in scripts/tools/memory/platform/ + ‹config› - Configuration identification string. + ‹target› - Build artifact identification string. + ‹binary› - Binary build artifact. + ‹output› - Output name or directory. + ‹option›… - Other options as for report_summary. + +This script also expects certain environment variables, which can be set in a +github workflow as follows: + + env: + BUILD_TYPE: nrfconnect + GH_EVENT_PR: ${{ github.event_name == 'pull_request' && github.event.number || 0 }} + GH_EVENT_HASH: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + GH_EVENT_PARENT: ${{ github.event_name == 'pull_request' && github.event.pull_request.base.sha || github.event.before }} + +Default output file is {platform}-{configname}-{buildname}-sizes.json in the +binary's directory. This file has the form: + + { + "platform": "‹platform›", + "config": "‹config›", + "target": "‹target›", + "time": 1317645296, + "input": "‹binary›", + "event": "pull_request", + "hash": "496620796f752063616e20726561642074686973", + "parent": "20796f752061726520746f6f20636c6f73652e0a", + "pr": 12345, + "by": "section", + "frames": { + "section": [ + {"section": ".bss", "size": 260496}, + {"section": ".data", "size": 1648}, + {"section": ".text", "size": 740236} + ] + } + } + +""" + +import datetime +import logging +import os +import pathlib +import sys + +import memdf.collect +import memdf.report +import memdf.select +import memdf.util + +from memdf import Config, ConfigDescription, DFs, SectionDF + +PLATFORM_CONFIG_DIR = pathlib.Path('scripts/tools/memory/platform') + +CONFIG: ConfigDescription = { + 'event': { + 'help': 'Github workflow event name', + 'metavar': 'NAME', + 'default': os.environ.get('GITHUB_EVENT_NAME'), + }, + 'pr': { + 'help': 'Github PR number', + 'metavar': 'NUMBER', + 'default': int(os.environ.get('GH_EVENT_PR', '0')), + }, + 'hash': { + 'help': 'Current commit hash', + 'metavar': 'HASH', + 'default': os.environ.get('GH_EVENT_HASH'), + }, + 'parent': { + 'help': 'Parent commit hash', + 'metavar': 'HASH', + 'default': os.environ.get('GH_EVENT_PARENT'), + }, + 'timestamp': { + 'help': 'Build timestamp', + 'metavar': 'TIME', + 'default': int(datetime.datetime.now().timestamp()), + }, +} + + +def main(argv): + status = 0 + + try: + _, platform, config_name, target_name, binary, *args = argv + except ValueError: + program = pathlib.Path(argv[0]) + logging.error( + """ + Usage: %s platform config target binary [output] [options] + + This is intended for use in github workflows. + For other purposes, a general program for the same operations is + %s/report_summary.py + + """, program.name, program.parent) + return 1 + + try: + config_file = pathlib.Path(platform) + if config_file.is_file(): + platform = config_file.stem + else: + config_file = (PLATFORM_CONFIG_DIR / platform).with_suffix('.cfg') + + output_base = f'{platform}-{config_name}-{target_name}-sizes.json' + if args and not args[0].startswith('-'): + out, *args = args + output = pathlib.Path(out) + if out.endswith('/') and not output.exists(): + output.mkdir(parents=True) + if output.is_dir(): + output = output / output_base + else: + output = pathlib.Path(binary).parent / output_base + + config = Config().init({ + **memdf.util.config.CONFIG, + **memdf.collect.CONFIG, + **memdf.select.CONFIG, + **memdf.report.OUTPUT_CONFIG, + **CONFIG, + }) + config.put('output.file', output) + config.put('output.format', 'json_records') + if config_file.is_file(): + config.read_config_file(config_file) + else: + logging.warning('Missing config file: %s', config_file) + config.parse([argv[0]] + args) + + config.put('output.metadata.platform', platform) + config.put('output.metadata.config', config_name) + config.put('output.metadata.target', target_name) + config.put('output.metadata.time', config['timestamp']) + config.put('output.metadata.input', binary) + config.put('output.metadata.by', 'section') + for key in ['event', 'hash', 'parent', 'pr']: + if value := config[key]: + config.putl(['output', 'metadata', key], value) + + collected: DFs = memdf.collect.collect_files(config, [binary]) + + sections = collected[SectionDF.name] + section_summary = sections[['section', + 'size']].sort_values(by='section') + section_summary.attrs['name'] = "section" + + summaries = { + 'section': section_summary, + } + + # Write configured (json) report to the output file. + memdf.report.write_dfs(config, summaries) + + # Write text report to stdout. + memdf.report.write_dfs(config, summaries, sys.stdout, 'simple') + + except Exception as exception: + status = 1 + raise exception + + return status + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/scripts/tools/memory/memdf/collect.py b/scripts/tools/memory/memdf/collect.py index b521d1183aef9a..b04c7b87d2b63c 100644 --- a/scripts/tools/memory/memdf/collect.py +++ b/scripts/tools/memory/memdf/collect.py @@ -284,6 +284,12 @@ def postprocess_collected(config: Config, dfs: DFs) -> None: dfs[c.name] = memdf.select.select_configured_column( config, dfs[c.name], column) + for df in dfs.values(): + if demangle := set((c for c in df.columns if c.endswith('symbol'))): + df.attrs['demangle'] = demangle + if hexify := set((c for c in df.columns if c.endswith('address'))): + df.attrs['hexify'] = hexify + FileReader = Callable[[Config, str, str], DFs] diff --git a/scripts/tools/memory/memdf/collector/elftools.py b/scripts/tools/memory/memdf/collector/elftools.py index 3219a4c11ac73d..84f4e16ba7ab19 100644 --- a/scripts/tools/memory/memdf/collector/elftools.py +++ b/scripts/tools/memory/memdf/collector/elftools.py @@ -37,7 +37,7 @@ def read_segments(config: Config, ef: ELFFile) -> SegmentDF: rows = [] for segment in ef.iter_segments(): rows.append([ - elftools.elf.descriptions.describe_p_type(segment['p_type']), + segment['p_type'], segment['p_vaddr'], segment['p_paddr'], segment['p_memsz'], segment['p_flags'] ]) diff --git a/scripts/tools/memory/memdf/report.py b/scripts/tools/memory/memdf/report.py index 5bf5246293d27e..0cd5cfe30b45fa 100644 --- a/scripts/tools/memory/memdf/report.py +++ b/scripts/tools/memory/memdf/report.py @@ -17,10 +17,12 @@ import contextlib import io +import json import pathlib import sys -from typing import Callable, Dict, IO, Optional, Union +from typing import (Any, Callable, Dict, List, Mapping, IO, Optional, Protocol, + Sequence, Tuple, Union) import cxxfilt # type: ignore import pandas as pd # type: ignore @@ -96,6 +98,38 @@ def demangle(symbol: str): return symbol +def hierify_rows(table: Sequence[Sequence[Any]]) -> List[List[Any]]: + if not table: + return table + persist = None + rows = [] + for row in table: + if persist is None: + persist = [None] * len(row) + new_persist = [] + new_row = [] + changed = False + for old, new in zip(persist, list(row)): + if not changed and isinstance(new, str) and new == old: + new_row.append('') + new_persist.append(old) + else: + changed = True + new_row.append(new) + new_persist.append(new) + rows.append(new_row) + persist = new_persist + return rows + + +def hierify(df: pd.DataFrame) -> pd.DataFrame: + columns = list(df.columns) + rows = hierify_rows(df.itertuples(index=False)) + r = pd.DataFrame(rows, columns=columns) + r.attrs = df.attrs + return r + + # Output OUTPUT_FILE_CONFIG: ConfigDescription = { @@ -112,38 +146,19 @@ def demangle(symbol: str): }, } -OUTPUT_FORMAT_CONFIG: ConfigDescription = { - Config.group_def('output'): { - 'title': 'output options', - }, - 'output.format': { - 'help': - 'Output format', - 'metavar': - 'FORMAT', - 'default': - 'text', - 'choices': [ - 'text', - 'csv', - 'tsv', - 'json_split', - 'json_records', - 'json_index', - 'json_columns', - 'json_values', - 'json_table', - ], - 'argparse': { - 'alias': ['--to', '-t'], - }, - }, -} -OUTPUT_CONFIG: ConfigDescription = { - **OUTPUT_FILE_CONFIG, - **OUTPUT_FORMAT_CONFIG, -} +def postprocess_output_metadata(config: Config, key: str) -> None: + """For --output-metadata=KEY:VALUE list, convert to dictionary.""" + assert key == 'output.metadata' + metadata = {} + for s in config.get(key): + if ':' in s: + k, v = s.split(':', 1) + else: + k, v = s, True + metadata[k] = v + config.put(key, metadata) + OutputOption = Union[IO, str, None] @@ -158,7 +173,8 @@ def open_output(config: Config, if isinstance(output, str): filename = output else: - if not (filename := config['output.file']): + filename = config['output.file'] + if (not filename) or (filename == '-'): yield sys.stdout return if suffix: @@ -168,22 +184,21 @@ def open_output(config: Config, f.close() -def write_table(config: Config, df: DF, output: IO) -> None: +# Single-table writers. + +def write_nothing(config: Config, df: DF, output: IO, **_kwargs) -> None: + pass + + +def write_text(config: Config, df: DF, output: IO, **_kwargs) -> None: """Write a memory usage data frame as a human-readable table.""" memdf.util.pretty.debug(df) if df.shape[0]: df = df.copy() - if 'symbol' in df.columns and config['report.demangle']: - df['symbol'] = df['symbol'].apply(demangle) last_column_is_left_justified = False formatters = [] for column in df.columns: - if column.endswith('address'): - # Hex format address. - width = (int(df[column].max()).bit_length() + 3) // 4 - formatters.append(lambda x: - '{0:0{width}X}'.format(x, width=width)) - elif pd.api.types.is_string_dtype(df.dtypes[column]): + if pd.api.types.is_string_dtype(df.dtypes[column]): df[column] = df[column].astype(str) # Left justify strings. width = max(len(column), df[column].str.len().max()) @@ -204,35 +219,49 @@ def write_table(config: Config, df: DF, output: IO) -> None: print(' '.join(df.columns)) -def write_text(config: Config, frames: DFs, output: OutputOption, - method: str) -> None: - """Write a group of of memory usage data frames as human-readable text.""" - with open_output(config, output) as out: - sep = '' - for df in frames.values(): - print(end=sep, file=out) - sep = '\n' - write_table(config, df, out) +def write_json(_config: Config, df: DF, output: IO, **kwargs) -> None: + """Write a memory usage data frame as json.""" + orient = kwargs.get('method', 'records') + # .removeprefix('json_') in 3.9 + if orient.startswith('json_'): + orient = orient[5:] + df.to_json(output, orient=orient) -def write_json(config: Config, frames: DFs, output: OutputOption, - method: str) -> None: - """Write a group of memory usage data frames as json.""" - orient = method[5:] - with open_output(config, output) as out: - sep = '[' - for df in frames.values(): - print(sep, file=out) - sep = ',' - df.to_json(out, orient=orient) - print(']', file=out) +def write_csv(_config: Config, df: DF, output: IO, **kwargs) -> None: + """Write a memory usage data frame in csv or tsv form.""" + kinds = {'csv': ',', 'tsv': '\t'} + method = kwargs.get('method', 'csv') + delimiter = kwargs.get('delimiter', kinds.get(method, method)) + df.to_csv(output, index=False, sep=delimiter) + + +def write_markdown(_config: Config, df: DF, output: IO, **kwargs) -> None: + """Write a memory usage data frame as markdown.""" + args = {k: kwargs[k] for k in ('index',) if k in kwargs} + if 'tabulate' in kwargs: + args.update(kwargs['tabulate']) + if 'tablefmt' not in args: + args['tablefmt'] = kwargs.get('method', 'pipe') + df.to_markdown(output, index=False, **args) + print(file=output) + + +# Multi-table writers. + +class DFsWriter(Protocol): + """Type checking for multiple table writers.""" + + def __call__(self, config: Config, dfs: DFs, output: OutputOption, + writer: Callable, **kwargs) -> None: + pass dfname_count = 0 -def dfname(df: DF) -> str: - """Get a name for a data frame; used when writing separate csv files.""" +def dfname(df: DF, k: str = 'unknown') -> str: + """Get a name for a data frame.""" try: return df.name except AttributeError: @@ -240,52 +269,238 @@ def dfname(df: DF) -> str: return c.name global dfname_count dfname_count += 1 - return 'unknown' + str(dfname_count) + return k + str(dfname_count) -def write_csv(config: Config, - frames: DFs, - output: OutputOption, - method: str = 'csv') -> None: - """Write a group of memory usage data frames in csv or tsv form. +def write_one(config: Config, frames: DFs, output: OutputOption, + writer: Callable, **kw) -> None: + """Write a group of of memory usage data frames to a single file.""" + with open_output(config, output) as out: + sep = '' + for df in frames.values(): + print(end=sep, file=out) + if kw.get('title') and 'titlefmt' in kw and 'title' in df.attrs: + print(kw['titlefmt'].format(df.attrs['title']), file=out) + sep = '\n' + writer(config, df, out, **kw) - When writing to files, a separate file is written for each table, - using the supplied file name as a prefix. - """ - kinds = {'csv': ',', 'tsv': '\t'} - delimiter = kinds.get(method, method) - if isinstance(output, str) and (extension := pathlib.Path(output).suffix): - pass - elif method in kinds: - extension = '.' + method - else: - extension = '.csv' + +def write_many(config: Config, frames: DFs, output: OutputOption, + writer: Callable, **kwargs) -> None: + """Write a group of memory usage data frames to multiple files.""" + if (suffix := kwargs.get('suffix')) is None: + if isinstance(output, str) and (suffix := pathlib.Path(output).suffix): + pass + elif 'method' in kwargs: + suffix = '.' + kwargs['method'] + else: + suffix = '' for df in frames.values(): name = dfname(df) - with open_output(config, output, f'-{name}{extension}') as out: - df.to_csv(out, index=False, sep=delimiter) - - -FileWriter = Callable[[Config, DFs, OutputOption, str], None] - -FILE_WRITERS: Dict[str, FileWriter] = { - 'text': write_text, - 'json_split': write_json, - 'json_records': write_json, - 'json_index': write_json, - 'json_columns': write_json, - 'json_values': write_json, - 'json_table': write_json, - 'csv': write_csv, - 'tsv': write_csv, + with open_output(config, output, f'-{name}{suffix}') as out: + writer(config, df, out, **kwargs) + + +def write_jsons(config: Config, frames: DFs, output: OutputOption, + writer: Callable, **kwargs) -> None: + """Write a group of memory usage data frames as a json dictionary.""" + with open_output(config, output) as out: + print('{', file=out) + if metadata := config['output.metadata']: + for k, v in metadata.items(): + print(f' {json.dumps(k)}: {json.dumps(v)},', file=out) + print(' "frames": ', file=out, end='') + sep = '{' + for df in frames.values(): + name = df.attrs.get('name', df.attrs.get('title', dfname(df))) + print(sep, file=out) + sep = ',' + print(f' {json.dumps(name)}: ', file=out, end='') + writer(config, df, out, indent=6, **kwargs) + print('}}', file=out) + + +def write_none(_config: Config, _frames: DFs, _output: OutputOption, + _writer: Callable, **_kwargs) -> None: + pass + + +def kwgetset(k: str, *args): + r = set() + for i in args: + r |= set(i.get(k, set())) + return r + + +def prep(config: Config, df: pd.DataFrame, kw: Dict) -> pd.DataFrame: + """Preprocess a table for output.""" + def each_column(k: str): + for column in set(df.attrs.get(k, set()) | kw.get(k, set())): + if column in df.columns: + yield column + + def maybe_copy(copied, df): + return (True, df if copied else df.copy()) + + copied = False + + if config['report.demangle']: + for column in each_column('demangle'): + copied, df = maybe_copy(copied, df) + df[column] = df[column].apply(demangle) + + for column in each_column('hexify'): + copied, df = maybe_copy(copied, df) + width = (int(df[column].max()).bit_length() + 3) // 4 + df[column] = df[column].apply( + lambda x: '{0:0{width}X}'.format(x, width=width)) + + if kw.get('hierify'): + df = hierify(df) + + return df + + +class Writer: + def __init__(self, + group: Callable, + single: Callable, + defaults: Optional[Dict] = None, + overrides: Optional[Dict] = None): + self.group = group + self.single = single + self.defaults = defaults or {} + self.overrides = overrides or {} + + def write_df(self, + config: Config, + frame: pd.DataFrame, + output: OutputOption = None, + **kwargs) -> None: + args = self._args(kwargs) + with open_output(config, output) as out: + self.single(config, prep(config, frame, args), out, **args) + + def write_dfs(self, + config: Config, + frames: DFs, + output: OutputOption = None, + **kwargs) -> None: + """Write a group of memory usage data frames.""" + args = self._args(kwargs) + frames = {k: prep(config, df, args) for k, df in frames.items()} + self.group(config, frames, output, self.single, **args) + + def _args(self, kw: Mapping) -> Dict: + r = self.defaults.copy() + r.update(kw) + r.update(self.overrides) + return r + + +class MarkdownWriter(Writer): + def __init__(self, + defaults: Optional[Dict] = None, + overrides: Optional[Dict] = None): + super().__init__(write_one, write_markdown, defaults, overrides) + + +class JsonWriter(Writer): + def __init__(self, + defaults: Optional[Dict] = None, + overrides: Optional[Dict] = None): + super().__init__(write_jsons, write_json, defaults, overrides) + self.overrides['hierify'] = False + + +class CsvWriter(Writer): + def __init__(self, + defaults: Optional[Dict] = None, + overrides: Optional[Dict] = None): + super().__init__(write_many, write_csv, defaults, overrides) + self.overrides['hierify'] = False + + +WRITERS: Dict[str, Writer] = { + 'none': Writer(write_none, write_nothing), + 'text': Writer(write_one, write_text, {'titlefmt': '\n{}\n'}), + 'json_split': JsonWriter(), + 'json_records': JsonWriter(), + 'json_index': JsonWriter(), + 'json_columns': JsonWriter(), + 'json_values': JsonWriter(), + 'json_table': JsonWriter(), + 'csv': CsvWriter({'delimiter': ','}), + 'tsv': CsvWriter({'delimiter': '\t'}), + 'plain': MarkdownWriter({'titlefmt': '\n{}\n'}), + 'simple': MarkdownWriter({'titlefmt': '\n{}\n'}), + 'grid': MarkdownWriter({'titlefmt': '\n\n'}), + 'fancy_grid': MarkdownWriter({'titlefmt': '\n\n'}), + 'html': MarkdownWriter({'titlefmt': '

'}), + 'unsafehtml': MarkdownWriter({'titlefmt': '

'}), + 'github': MarkdownWriter(), + 'pipe': MarkdownWriter(), + 'orgtbl': MarkdownWriter(), + 'jira': MarkdownWriter(), + 'presto': MarkdownWriter(), + 'pretty': MarkdownWriter(), + 'psql': MarkdownWriter(), + 'rst': MarkdownWriter(), + 'mediawiki': MarkdownWriter(), + 'moinmoin': MarkdownWriter(), + 'youtrack': MarkdownWriter(), + 'latex': MarkdownWriter(), + 'latex_raw': MarkdownWriter(), + 'latex_booktabs': MarkdownWriter(), + 'latex_longtable': MarkdownWriter(), + 'textile': MarkdownWriter(), +} + +OUTPUT_FORMAT_CONFIG: ConfigDescription = { + Config.group_def('output'): { + 'title': 'output options', + }, + 'output.format': { + 'help': 'Output format', + 'metavar': 'FORMAT', + 'default': 'simple', + 'choices': list(WRITERS.keys()), + 'argparse': { + 'alias': ['--to', '-t'], + }, + }, + 'output.metadata': { + 'help': 'Metadata for JSON', + 'metavar': 'NAME:VALUE', + 'default': [], + 'argparse': { + 'alias': ['--metadata'] + }, + 'postprocess': postprocess_output_metadata, + } +} + +OUTPUT_CONFIG: ConfigDescription = { + **OUTPUT_FILE_CONFIG, + **OUTPUT_FORMAT_CONFIG, } def write_dfs(config: Config, frames: DFs, output: OutputOption = None, - method: Optional[str] = None) -> None: + method: Optional[str] = None, + **kwargs) -> None: """Write a group of memory usage data frames.""" - if method is None: - method = config['output.format'] - FILE_WRITERS[method](config, frames, output, method) + kwargs['method'] = method or config['output.format'] + WRITERS[kwargs['method']].write_dfs(config, frames, output, **kwargs) + + +def write_df(config: Config, + frame: DF, + output: OutputOption = None, + method: Optional[str] = None, + **kwargs) -> None: + """Write a memory usage data frame.""" + kwargs['method'] = method or config['output.format'] + WRITERS[kwargs['method']].write_df(config, frame, output, **kwargs) diff --git a/scripts/tools/memory/memdf/select.py b/scripts/tools/memory/memdf/select.py index f14d58bae1a5bb..77a3d3f05b4d1e 100644 --- a/scripts/tools/memory/memdf/select.py +++ b/scripts/tools/memory/memdf/select.py @@ -15,12 +15,16 @@ # """Data frame selection utilities.""" +import numpy as np # type: ignore + import memdf.name import memdf.util.pretty import memdf.util.config from memdf import Config, ConfigDescription, DF +from typing import Optional + def split_size(config: Config, key: str) -> None: """Split a name:size configuration value. @@ -128,14 +132,18 @@ def synthesize_region(config: Config, df: DF, column: str) -> DF: return df +def groupby_region(df: DF): + return df[(df['size'] > 0) | (df['region'] != memdf.name.UNKNOWN)] + + SYNTHESIZE = { - 'region': synthesize_region, + 'region': (synthesize_region, groupby_region), } def synthesize_column(config: Config, df: DF, column: str) -> DF: if column not in df.columns: - SYNTHESIZE[column](config, df, column) + SYNTHESIZE[column][0](config, df, column) return df @@ -152,3 +160,12 @@ def select_configured(config: Config, df: DF, columns=SELECTION_CHOICES) -> DF: for column in columns: df = select_configured_column(config, df, column) return df + + +def groupby(config: Config, df: DF, by: Optional[str] = None): + if not by: + by = config['report.by'] + df = df[[by, 'size']].groupby(by).aggregate(np.sum).reset_index() + if by in SYNTHESIZE: + df = SYNTHESIZE[by][1](df) + return df diff --git a/scripts/tools/memory/memdf/util/nd.py b/scripts/tools/memory/memdf/util/nd.py index 33010aa7cad8bf..3cfb99e62165eb 100644 --- a/scripts/tools/memory/memdf/util/nd.py +++ b/scripts/tools/memory/memdf/util/nd.py @@ -15,9 +15,9 @@ # """Nested dictionary utilities.""" -from typing import (Any, Mapping, MutableMapping, Optional) +from typing import Any, Mapping, MutableMapping, Optional, Sequence -Key = list +Key = Sequence def get(nd: Optional[Mapping], keys: Key, default: Any = None) -> Any: @@ -46,10 +46,25 @@ def put(nd: MutableMapping, keys: Key, value: Any) -> None: nd[key] = value +def store(nd: MutableMapping, keys: Key, value: Any, empty: Any, add) -> None: + """Store a value in a nested dictionary where the leaves are containers.""" + while True: + key = keys[0] + keys = keys[1:] + if not keys: + break + if key not in nd: + nd[key] = {} + nd = nd[key] + if key not in nd: + nd[key] = empty + add(nd[key], value) + + def update(nd: MutableMapping, src: Mapping) -> None: """Update a nested dictionary.""" for k, v in src.items(): - if k not in nd: + if k not in nd or nd[k] is None: nd[k] = v elif isinstance(nd[k], dict) and isinstance(v, dict): update(nd[k], v) @@ -61,4 +76,4 @@ def update(nd: MutableMapping, src: Mapping) -> None: elif type(nd[k]) == type(v): nd[k] = v else: - raise TypeError("type mismatch") + raise TypeError(f"type mismatch {k},{v} was {nd[k]}") diff --git a/scripts/tools/memory/memdf/util/sqlite.py b/scripts/tools/memory/memdf/util/sqlite.py new file mode 100644 index 00000000000000..eba40f1818de8c --- /dev/null +++ b/scripts/tools/memory/memdf/util/sqlite.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python3 +# +# Copyright (c) 2021 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Wrapper and utility functions around sqlite3""" + +import sqlite3 + +from typing import List, Optional + +from memdf import Config, ConfigDescription + +CONFIG: ConfigDescription = { + Config.group_def('database'): { + 'title': 'database options', + }, + 'database.file': { + 'help': 'Sqlite3 file', + 'metavar': 'FILENAME', + 'default': ':memory:', + 'argparse': { + 'alias': ['--db'], + }, + }, +} + + +class Database: + """Wrapper and utility functions around sqlite3""" + on_open: Optional[List[str]] = None + on_writable: Optional[List[str]] = None + + def __init__(self, filename: str, writable: bool = True): + self.filename = filename + self.writable = writable + self.con: Optional[sqlite3.Connection] = None + + def __enter__(self): + return self.open() + + def __exit__(self, et, ev, traceback): + self.close() + return False + + def open(self): + """Open and initialize the database connection.""" + if not self.con: + db = 'file:' + self.filename + if not self.writable: + db += '?mode=ro' + self.con = sqlite3.connect(db, uri=True) + if self.on_open: + for i in self.on_open: + self.con.execute(i) + if self.writable and self.on_writable: + for i in self.on_writable: + self.con.execute(i) + return self + + def close(self): + if self.con: + self.con.close() + self.con = None + return self + + def connection(self) -> sqlite3.Connection: + assert self.con + return self.con + + def execute(self, query, parameters=None): + if parameters: + return self.con.execute(query, parameters) + return self.con.execute(query) + + def commit(self): + self.con.commit() + return self + + def store(self, table: str, **kwargs): + """Insert the data if it does not already exist.""" + q = (f"INSERT INTO {table} ({','.join(kwargs.keys())})" + f" VALUES ({','.join('?' * len(kwargs))})" + f" ON CONFLICT DO NOTHING") + v = list(kwargs.values()) + self.connection().execute(q, v) + + def get_matching(self, table: str, columns: List[str], **kwargs): + return self.connection().execute( + f"SELECT {','.join(columns)} FROM {table}" + f" WHERE {'=? AND '.join(kwargs.keys())}=?", + list(kwargs.values())) + + def get_matching_id(self, table: str, **kwargs): + return self.get_matching(table, ['id'], **kwargs).fetchone()[0] + + def store_and_return_id(self, table: str, **kwargs) -> int: + self.store(table, **kwargs) + return self.get_matching_id(table, **kwargs) diff --git a/scripts/tools/memory/platform/esp32.cfg b/scripts/tools/memory/platform/esp32.cfg index 3b140fe93ac55a..650af9c33a5bfb 100644 --- a/scripts/tools/memory/platform/esp32.cfg +++ b/scripts/tools/memory/platform/esp32.cfg @@ -18,7 +18,7 @@ 'section': { # By default, only these sections will be included # when operating by sections. - 'default': ['.flash.text', '.flash.rodata', '.dram0.bss', '.dram0'.data', '.iram0.text'] + 'default': ['.flash.text', '.flash.rodata', '.dram0.bss', '.dram0.data', '.iram0.text'] }, 'symbol': { 'free': { diff --git a/scripts/tools/memory/platform/linux.cfg b/scripts/tools/memory/platform/linux.cfg new file mode 100644 index 00000000000000..2f11c3f4d4db99 --- /dev/null +++ b/scripts/tools/memory/platform/linux.cfg @@ -0,0 +1,77 @@ +# Copyright (c) 2021 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Memory tools default configuation for Linux. + +{ + 'section': { + # By default, only these sections will be included + # when operating by sections. + 'default': [ + '.text', '.data', '.data.rel.ro', '.bss', '.dynamic', '.got', + '.init', '.init_array', '.rodata' + ] + }, +# 'symbol': { +# 'free': { +# # These symbols mark the start or end of areas where memory that +# # does not belong to any symbol is considered unused (rather than +# # a gap that may be in use for some non-symbol purpose, e.g. string +# # constants or alignment). +# 'start': [], +# 'end': [], +# } +# }, + 'region': { + # Regions are sets of sections that can be used for aggregate reports. + 'sections': { + 'FLASH': [ + ".dynstr", + ".dynsym", + ".eh_frame_hdr", + ".eh_frame", + ".fini", + ".gcc_except_table", + ".gnu.version_d", + ".gnu.version_r", + ".gnu.version", + ".hash", + ".init", + ".interp", + ".note.ABI-tag", + ".rodata1", + ".rodata", + ".strtab", + ".symtab", + ".text", + ], + 'RAM': [ + ".bss", + ".ctors", + ".data1", + ".data.rel.ro", + ".data", + ".dtors", + ".dynamic", + ".fini_array", + ".got.plt", + ".init_array", + ".jcr", + ".preinit_array", + ".tbss", + ".tdata", + ] + } + }, +} diff --git a/scripts/tools/memory/platform/mbed.cfg b/scripts/tools/memory/platform/mbed.cfg new file mode 100644 index 00000000000000..f261ef24a6f9d3 --- /dev/null +++ b/scripts/tools/memory/platform/mbed.cfg @@ -0,0 +1,42 @@ +# Copyright (c) 2021 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Memory tools default configuation for Mbed. + +{ + 'section': { + # By default, only these sections will be included + # when operating by sections. + 'default': ['.text', '.data', '.bss', '.heap'], + }, +# 'symbol': { +# 'free': { +# # These symbols mark the start or end of areas where memory that +# # does not belong to any symbol is considered unused (rather than +# # a gap that may be in use for some non-symbol purpose, e.g. string +# # constants or alignment). +# 'start': [], +# 'end': [], +# } +# }, +# 'region': { +# # Regions are sets of sections that can be used for aggregate reports. +# 'sections': { +# 'FLASH': [ +# ], +# 'RAM': [ +# ] +# } +# }, +} diff --git a/scripts/tools/memory/platform/p6.cfg b/scripts/tools/memory/platform/p6.cfg new file mode 100644 index 00000000000000..96990a3cc7b5b6 --- /dev/null +++ b/scripts/tools/memory/platform/p6.cfg @@ -0,0 +1,42 @@ +# Copyright (c) 2021 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Memory tools default configuation for Infineon P6. + +{ + 'section': { + # By default, only these sections will be included + # when operating by sections. + 'default': ['.text', '.data', '.bss', '.heap'], + }, +# 'symbol': { +# 'free': { +# # These symbols mark the start or end of areas where memory that +# # does not belong to any symbol is considered unused (rather than +# # a gap that may be in use for some non-symbol purpose, e.g. string +# # constants or alignment). +# 'start': [], +# 'end': [], +# } +# }, +# 'region': { +# # Regions are sets of sections that can be used for aggregate reports. +# 'sections': { +# 'FLASH': [ +# ], +# 'RAM': [ +# ] +# } +# }, +} diff --git a/scripts/tools/memory/platform/telink.cfg b/scripts/tools/memory/platform/telink.cfg new file mode 100644 index 00000000000000..4db7fcd95fc201 --- /dev/null +++ b/scripts/tools/memory/platform/telink.cfg @@ -0,0 +1,42 @@ +# Copyright (c) 2021 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Memory tools default configuation for Telink. + +{ + 'section': { + # By default, only these sections will be included + # when operating by sections. + 'default': ['bss', 'noinit', 'ram_code' 'rodata', 'text'] + }, +# 'symbol': { +# 'free': { +# # These symbols mark the start or end of areas where memory that +# # does not belong to any symbol is considered unused (rather than +# # a gap that may be in use for some non-symbol purpose, e.g. string +# # constants or alignment). +# 'start': [], +# 'end': [], +# } +# }, +# 'region': { +# # Regions are sets of sections that can be used for aggregate reports. +# 'sections': { +# 'FLASH': [ +# ], +# 'RAM': [ +# ] +# } +# }, +} diff --git a/scripts/tools/memory/report_summary.py b/scripts/tools/memory/report_summary.py index a0f7002e0dfe80..489600c7a1af3f 100755 --- a/scripts/tools/memory/report_summary.py +++ b/scripts/tools/memory/report_summary.py @@ -49,10 +49,8 @@ def main(argv): }, argv) dfs: DFs = memdf.collect.collect_files(config) - by = config['report.by'] symbols = dfs[SymbolDF.name] - summary = symbols[[by, 'size' - ]].groupby(by).aggregate(np.sum).reset_index() + summary = memdf.select.groupby(config, symbols) memdf.report.write_dfs(config, {SymbolDF.name: summary}) except Exception as exception: