diff --git a/.circleci/config.yml b/.circleci/config.yml index f1c7a524f..581424b69 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,74 +1,55 @@ +# As much as possible, this file should be kept in sync with: +# https://github.com/napari/napari/blob/main/.circleci/config.yaml # Use the latest 2.1 version of CircleCI pipeline process engine. -# See: https://circleci.com/docs/2.0/configuration-reference +# See: https://circleci.com/docs/2.1/configuration-reference version: 2.1 - -# Orbs are reusable packages of CircleCI configuration that you may share across projects, enabling you to create encapsulated, parameterized commands, jobs, and executors that can be used across multiple projects. -# See: https://circleci.com/docs/2.0/orb-intro/ +# Orbs are reusable packages of CircleCI configuration that you may share across projects. +# See: https://circleci.com/docs/2.1/orb-intro/ orbs: - # The python orb contains a set of prepackaged CircleCI configuration you can use repeatedly in your configuration files - # Orb commands and jobs help you with common scripting around a language/tool - # so you dont have to copy and paste it everywhere. - # See the orb documentation here: https://circleci.com/developer/orbs/orb/circleci/python python: circleci/python@1.5.0 - -# Define a job to be invoked later in a workflow. -# See: https://circleci.com/docs/2.0/configuration-reference/#jobs jobs: - build-docs: # This is the name of the job, feel free to change it to better match what you're trying to do! - # These next lines defines a Docker executors: https://circleci.com/docs/2.0/executor-types/ - # You can specify an image from Dockerhub or use one of the convenience images from CircleCI's Developer Hub - # A list of available CircleCI Docker convenience images are available here: https://circleci.com/developer/images/image/cimg/python - # The executor is the environment in which the steps below will be executed - below will use a python 3.10.2 container - # Change the version below to your required version of python + build-docs: docker: - - image: cimg/python:3.10.2 - # Checkout the code as the first step. This is a dedicated CircleCI step. - # The python orb's install-packages step will install the dependencies from a Pipfile via Pipenv by default. - # Here we're making sure we use just use the system-wide pip. By default it uses the project root's requirements.txt. - # Then run your tests! - # CircleCI will report the results back to your VCS provider. + # A list of available CircleCI Docker convenience images are available here: https://circleci.com/developer/images/image/cimg/python + - image: cimg/python:3.10.13 steps: - - checkout + - checkout: + path: docs + - run: + name: Clone main repo into a subdirectory + command: git clone git@github.com:napari/napari.git napari - run: name: Install qt libs + xvfb command: sudo apt-get update && sudo apt-get install -y xvfb libegl1 libdbus-1-3 libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-xinerama0 libxcb-xinput0 libxcb-xfixes0 x11-utils - - run: name: Setup virtual environment - # app-dir: ~/project/package-directory/ # If you're requirements.txt isn't in the root directory. command: | python -m venv venv . venv/bin/activate python -m pip install --upgrade pip - - run: - name: Clone main repo - command: git clone git@github.com:napari/napari.git napari + - run: name: Install napari-dev command: | . venv/bin/activate - python -m pip install -e napari/".[pyside,dev]" -c "napari/resources/constraints/constraints_py3.10.txt" - - run: - name: Install python dependencies - command: | - . venv/bin/activate - python -m pip install -r requirements.txt + python -m pip install -e "napari/[pyside,dev]" + environment: + PIP_CONSTRAINT: napari/resources/constraints/constraints_py3.10_docs.txt - run: name: Build docs command: | . venv/bin/activate - xvfb-run --auto-servernum make docs GALLERY_PATH=../napari/examples/ + cd docs + xvfb-run --auto-servernum make docs + environment: + PIP_CONSTRAINT: ../napari/resources/constraints/constraints_py3.10_docs.txt - store_artifacts: - path: docs/_build/ + path: docs/docs/_build/ - persist_to_workspace: root: . paths: - - docs/_build/ - -# Invoke jobs via workflows -# See: https://circleci.com/docs/2.0/configuration-reference/#workflows + - docs/docs/_build/ workflows: - build-docs: # This is the name of the workflow, feel free to change it to better match your workflow. - # Inside the workflow, you define the jobs you want to run. + build-docs: jobs: - build-docs diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index fd2d62aec..64d8897ae 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -8,36 +8,44 @@ on: - main workflow_dispatch: +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: build-and-upload: name: Build & Upload Artifact runs-on: ubuntu-latest steps: - name: Clone docs repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 + with: + path: docs # place in a named directory - name: Clone main repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: path: napari # place in a named directory repository: napari/napari # ensure version metadata is proper fetch-depth: 0 - - name: Copy examples to docs folder - run: | - cp -R napari/examples . - - uses: actions/setup-python@v4 with: python-version: "3.10" + cache-dependency-path: | + napari/setup.cfg + docs/requirements.txt - uses: tlambert03/setup-qt-libs@v1 - name: Install Dependencies run: | python -m pip install --upgrade pip - python -m pip install "napari/[all]" -c "napari/resources/constraints/constraints_py3.10_docs.txt" + python -m pip install "napari/[all]" + python -m pip install -r docs/requirements.txt + env: + PIP_CONSTRAINT: ${{ github.workspace }}/napari/resources/constraints/constraints_py3.10_docs.txt - name: Testing run: | @@ -49,11 +57,13 @@ jobs: env: GOOGLE_CALENDAR_ID: ${{ secrets.GOOGLE_CALENDAR_ID }} GOOGLE_CALENDAR_API_KEY: ${{ secrets.GOOGLE_CALENDAR_API_KEY }} + PIP_CONSTRAINT: ${{ github.workspace }}/napari/resources/constraints/constraints_py3.10_docs.txt with: - run: make docs GALLERY_PATH=../examples/ + run: make -C docs docs + - name: Upload artifact uses: actions/upload-artifact@v3 with: name: docs - path: docs/_build + path: docs/docs/_build diff --git a/.github/workflows/circleci.yml b/.github/workflows/circleci.yml index dbafabb09..8a14cb131 100644 --- a/.github/workflows/circleci.yml +++ b/.github/workflows/circleci.yml @@ -19,6 +19,6 @@ jobs: with: repo-token: ${{ secrets.GITHUB_TOKEN }} api-token: ${{ secrets.CIRCLECI_TOKEN }} - artifact-path: 0/docs/_build/index.html + artifact-path: 0/docs/docs/_build/index.html circleci-jobs: build-docs job-title: Check the rendered docs here! diff --git a/.github/workflows/deploy_docs.yml b/.github/workflows/deploy_docs.yml index 0a9f26fe4..f29711544 100644 --- a/.github/workflows/deploy_docs.yml +++ b/.github/workflows/deploy_docs.yml @@ -47,7 +47,8 @@ jobs: run: | python -m pip install --upgrade pip python -m pip install "napari-repo/[all]" -c "napari-repo/resources/constraints/constraints_py3.10_docs.txt" - + python -m pip install -r docs/requirements.txt -c "napari-repo/resources/constraints/constraints_py3.10_docs.txt" + - name: Testing run: | python -c 'import napari; print(napari.__version__)' diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index f04408a99..002112e07 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -5,8 +5,11 @@ on: jobs: triage: + permissions: + contents: read + pull-requests: write runs-on: ubuntu-latest steps: - - uses: actions/labeler@main + - uses: actions/labeler@v4 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/docs/_scripts/update_preference_docs.py b/docs/_scripts/update_preference_docs.py index 865a3ca1a..086302320 100644 --- a/docs/_scripts/update_preference_docs.py +++ b/docs/_scripts/update_preference_docs.py @@ -1,7 +1,7 @@ from pathlib import Path from jinja2 import Template -from pydantic.main import ModelMetaclass +from napari._pydantic_compat import ModelMetaclass from qtpy.QtCore import QTimer from qtpy.QtWidgets import QMessageBox diff --git a/docs/_toc.yml b/docs/_toc.yml index 1b896d24e..627dff03a 100644 --- a/docs/_toc.yml +++ b/docs/_toc.yml @@ -145,6 +145,7 @@ subtrees: - file: naps/5-new-logo - file: naps/6-contributable-menus - file: naps/7-key-binding-dispatch + - file: naps/8-telemetry - file: developers/documentation/index subtrees: - entries: diff --git a/docs/conf.py b/docs/conf.py index b28d67d23..a14929e2c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -163,7 +163,7 @@ myst_heading_anchors = 4 version_string = '.'.join(str(x) for x in __version_tuple__[:3]) -python_version = '3.9' +python_version = '3.10' python_version_range = '3.8–3.10' myst_substitutions = { diff --git a/docs/further-resources/napari-workshops.md b/docs/further-resources/napari-workshops.md index 70575f228..4e4062bc1 100644 --- a/docs/further-resources/napari-workshops.md +++ b/docs/further-resources/napari-workshops.md @@ -21,6 +21,10 @@ or contact the core developers on [zulip chat](https://napari.zulipchat.com/logi ## Workshops *Workshops are listed from newest to oldest.* +* September 2023 + * [Plugin design](https://chanzuckerberg.github.io/napari-plugin-accel-workshops/workshops/design23.html) and [software development workshop](https://chanzuckerberg.github.io/napari-plugin-accel-workshops/workshops/softwaredev.html) for [napari plugin grantees](https://chanzuckerberg.com/rfa/napari-plugin-grants/). + * [Bioimage Analysis with Python and Napari (video lecture)](https://www.youtube.com/watch?v=QDS5t7oZH-c) at the [EMBO Practical Course for Advanced Methods in Bioimage Analysis](https://www.embl.org/about/info/course-and-conference-office/events/bia23-01/) + * November 2022, napari foundation grant onboarding * [Getting started with napari plugin development slide deck](https://docs.google.com/presentation/d/15lrFRLPm9bfmU4hgcVwoduIJr5bqhoHo7ZeWLO6H_Us/edit?usp=sharing) * [Watch it here](https://drive.google.com/file/d/1IYDV-GTGEYh5j_tvBaWYEZ_tQXTqmJkr/view?usp=share_link) diff --git a/docs/howtos/layers/points.md b/docs/howtos/layers/points.md index e3222ad32..d174b3e27 100644 --- a/docs/howtos/layers/points.md +++ b/docs/howtos/layers/points.md @@ -153,8 +153,10 @@ additional points, or by dragging a bounding box around the points you want to select. You can quickly select the select points tool by pressing the `S` key when the points layer is selected. -You can select all the points in the currently viewed slice by clicking the `A` -key if you are in select mode. +Additionally, you can select all the points in the currently viewed slice by pressing +the `A` key and all the points in the layer (across all slices) using `Shift-A`. +Note: Pressing either keybinding again will toggle the selection, so you can select +all points in a layer and the *deselect* points from a slice. Once selected you can delete the selected points by clicking on the delete button in the layer controls panel or pressing the delete key. diff --git a/docs/naps/8-telemetry.md b/docs/naps/8-telemetry.md new file mode 100644 index 000000000..c36839f5e --- /dev/null +++ b/docs/naps/8-telemetry.md @@ -0,0 +1,249 @@ +(nap-8)= + + # NAP-8 — Telemetry + + ```{eval-rst} + :Author: Grzegorz Bokota + :Created: 2023-08-11 + :Resolution: (required for Accepted | Rejected | Withdrawn) + :Resolved: + :Status: Draft + :Type: Standards Track + :Version effective: (for accepted NAPs) + ``` + + ## Abstract + +This NAP describes how telemetry would be used by the napari project and the architecture +and solutions proposed to maximize the privacy of our users. + + ## Motivation and Scope + +With the growth of napari, +the standard feedback loop through napari community meetings and napari-related events at conferences has reached its capacity. +Also, we collect many feature requests for which we cannot find volunteers for implementation. + +To have the possibility of sustainable development of the project, +we will either need to rely on paid contractors or on companies donating employee time managed by the core devs. + +Both scenarios require us to provide some information about the estimated number of users to prove to potential +funders that their donation/grant will be used in a valuable way. + +Adding the option for monitoring plugin usage allows us to identify heavily used plugins and try +to establish cooperation with their maintainers +to reduce the probability that the plugin will not be ready for a new napari release. +Such monitoring could contain not only the list of installed plugins +but also which commands and contributions are used most often. + +Also collecting information about data types and their size will provide valuable information about the typical use cases of napari. + +Still, users need to be able to opt out of such monitoring, +and adjust the level of detail of the information that is sent to the napari server. +Each time when we update the collected data, +we should inform users about the changes and provide them with the possibility to opt out of telemetry. + +Users could also provide a temporary agreement for sending telemetry. +Then after a given period of time, the dialog with question will be shown again. + + + ## Detailed Description + +`napari-telemetry` will be a package responsible for collecting and sending telemetry data to the napari server. +It will be installed after user confirmation. +It will contain callbacks for data collection, and utils for storage and sending. +Also, this package will contain utils for validating if the user has agreed to telemetry. + +In the main package, there is a need to add code to ask users if they want to enable telemetry. +This code should be executed only once per environment. + +Telemetry should contain the following ways to disable it: + +1. Disable in settings +2. Uninstall `napari-telemetry` package +3. Environment variable `NAPARI_TELEMETRY=0` +4. System-wide disablement, e.g., via firewall filtering for hpc or other environments. + +The user should be able to adjust the telemetry level of detail. The following levels are proposed: + +1. `none` - no telemetry is collected +2. `basic` - information about the napari version, python version, OS, and CPU architecture is collected, + and if it is the first report by the user. + There is also a user identifier created based on computer details + that will be regenerated each week to prevent tracking the user, + but allow us to accurately gauge individual user numbers. +3. `middle` - same as in `basic` plus information about the list of installed plugins and their versions are also collected. + We take care to not collect data about plugins that are not intended to be public, + so we will only collect information about plugins searchable as using plugin dialog or napari hub. + We also will not collect information about plugins that are installed in a non-stable version. +4. `full` - same as in `middle` + plus collects information about plugin usage by binding to app-model and logging plugin commands used. + Additionally basic information about data like types + (`np.ndarray`, `dask.array`, `zarr.Array`, etc.) and its size will be collected. + +There should be a visible indicator that telemetry is enabled (for example, on the status bar). + +The second part of this work should be to set up the server to collect telemetry data. +After collecting data, +it should provide a basic public dashboard that will allow the community to see aggregated information. + +We propose the following data retention policy: + +1) Up to 2 weeks for logs. +2) Up to 2 months of raw data (1 month of collection, then aggregation and time to validate aggregated data). +3) Infinite of aggregated data. + +## Privacy assessment + +During the preparation of this NAP, we assume that none of the collected data will be presented in +a form that allows to identify a single user or identify a research area of user. +We also select a set of data that will be collected to minimize the possibility of revealing sensitive data. +However, it is impossible to guarantee that it will not be possible to identify a single user +(for example, by checking installed plugin combinations). + +Because of this, we propose to not publish raw data and only show aggregated results. +The aggregation will be performed using scripts. +Napari core devs will access raw data only if there are errors in the aggregation process. + +We also will publish a list of endpoints for each level of telemetry, +so the given level of telemetry could be blocked on the organization level +(for example, by the rule on the firewall). + + +If someone found that we are publishing some problematic data, we will remove them +and update the aggregation process to prevent such a situation in the future. +This NAP will be updated to reflect the current state of telemetry. + + +## Related Work + +Total systems: +https://plausible.io/ +https://sentry.io/ +https://opentelemetry.io/ + +Visualizations: +https://github.com/grafana/grafana + + + +## Implementation + +The key consideration for implementation should be the low cost of maintenance. +So the solution should be as simple as possible. +We could either use existing solutions on the server side or implement our own. + +The benefit of existing solutions is that most of the work is already done. +The downside is that it may require additional cost of maintenance. +This cost may be caused by many features that are not needed for napari and could increase the risk of leaking data. +Quick checks of their code revealed they are implemented in techniques that are not familiar to napari core devs. +So, if we decide to use them, we should select an SAS solution that will be maintained by the company. + + +For now, I suggest creating a simple REST API server for collecting the data. +It could be a simple Python FastAPI server that will store data in the SQLite database. +Connection to server will be encrypted using HTTPS and certificate provided by LetsEncrypt. + +Data for aggregation should be extracted from the database using a script running on the same machine. + +The output of the aggregation script should be loaded to some existing visualization tool, like grafana. + +It may be nice to host raw and aggregate data on separate servers — +then even if the data presented on the dashboard is compromised, +the raw data will be not exposed to the world. + +Having both server and aggregation scripts in Python will reduce maintenance costs for napari core devs. + +We should register the `telemetry.napari.org` domain and use it for the server. +The main page will contain this NAP and a link to the summary dashboard. + + +The main part of the application side should be implemented in `napari-telemetry` package. +The package should not report in stream mode, but collect data on the disk and send it in batches. +This will reduce the risk of leaking data. +The package should implement a utility to allow users to preview collected data before sending it to the server. + +In napari itself, the following changes should be implemented: + +1) The indicator that shows the telemetry status +2) The dialog that asks a user if they want to enable telemetry +3) code to check if telemetry is enabled (to not load the `napari-telemetry` package if it is disabled) +4) code required to init `napari-telemetry` package + + +## GDPR compliance + +I'm almost sure that we will not collect data that are covered by GDPR. +But to get a better atmosphere, +we need to add instruction how a user could retrieve their unique identifier and set up a process +for requests to remove data from the server. +It is not a high probability of usage as the life span of data is short, +but we need to be prepared for such a situation. I suggest to use e-mail for that. + + + +## Backward Compatibility + + Not relevant + +## Future Work + +A nice extension may be the ability for the steering council to create a certificate of telemetry output that could be +given to plugin maintainers to prove to supervisors that their plugin is used by the community. + + +## Alternatives + +During the discussion, there is a proposal to use the same approach as used in ImageJ. + +This would mean, instead of implementing telemetry on the client side, we could implement it on the update server side. +The advantage and disadvantage of such a solution is that no user could opt out of telemetry. +Also, such a method could potentially provide information about the Python version, +napari version, and list of installed plugins. +All others will require a mechanism from this NAP. + +It will also require updates on the Napari side +as currently we only communicate with the update server when a user opens the plugin manager. +Also, +to have proper information about installed plugins, we will need +to send information about the list of installed plugins +instead of just downloading the information about all plugins from the server. + +As this solution provides less information, +it does not allow for opt-out and could cause ban-listing of the update server IP address, +I do not recommend it. + +But based on talks +that happen during the discussion, we may think about more frequent checks for updates +to inform users that they could update their Napari or plugin version. +For such a change, we need to update our update server to provide information per Python version +(as some plugins could drop old Python earlier). + +The second alternative is use a third-party solution like [plausible.io](https://plausible.io/). +But from my perspective, +it is harder to adjust a set of data that is collected as these services are designed to monitor webpages. + + + ## Discussion + + This section may just be a bullet list including links to any discussions + regarding the NAP, but could also contain additional comments about that + discussion: + + - This includes links to discussion forum threads or relevant GitHub discussions. + + ## References and Footnotes + + All NAPs should be declared as dedicated to the public domain with the CC0 + license [^id3], as in `Copyright`, below, with attribution encouraged with + CC0+BY [^id4]. + + [^id3]: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication, + + + [^id4]: + + ## Copyright + + This document is dedicated to the public domain with the Creative Commons CC0 + license [^id3]. Attribution to this source is encouraged where appropriate, as per + CC0+BY [^id4]. diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index 999be5594..000000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -sphinx<5 -sphinx-autobuild -sphinx-tabs -sphinx-tags -sphinx-design -sphinx-external-toc -sphinx-favicon -sphinx-gallery -sphinx_autodoc_typehints==1.12.0 -myst-nb -napari-sphinx-theme -matplotlib -lxml -imageio-ffmpeg diff --git a/docs/tutorials/fundamentals/installation.md b/docs/tutorials/fundamentals/installation.md index ada383d61..e9b493f00 100644 --- a/docs/tutorials/fundamentals/installation.md +++ b/docs/tutorials/fundamentals/installation.md @@ -170,6 +170,13 @@ the current release {{ napari_version }}, using command: `napari --version` . ```` ![macOS desktop with a napari viewer window without any image opened in the foreground, and a terminal in the background with the appropriate conda environment activated (if applicable) and the command to open napari entered.](../assets/tutorials/launch_cli_empty.png) +````{note} +On some platforms, particularly macOS and Windows, there may be a ~30 second +delay before the viewer appears on first launch. This is expected and subsequent +launches should be quick. However, anti-malware and other security software +measures may further delay launches—even after the first launch. +```` + ## Choosing a different Qt backend napari needs a library called [Qt](https://www.qt.io/) to run its user interface @@ -263,10 +270,8 @@ started](./getting_started) tutorial - if you are interested in contributing to napari please check our [contributing guidelines](../../developers/contributing.md) -- if you are running into issues or bugs, please open a new issue on our [issue +- if you are running into issues or bugs, please open a [new issue](https://github.com/napari/napari/issues/new/choose) on our [issue tracker](https://github.com/napari/napari/issues) - - include the output of `napari --info` - (or go to `Help>Info` in the viewer and copy paste the information) - if you want help using napari, we are a community partner on the [imagesc forum](https://forum.image.sc/tags/napari) and all usage support requests should be posted on the forum with the tag `napari`. We look forward to interacting diff --git a/docs/tutorials/fundamentals/viewer.md b/docs/tutorials/fundamentals/viewer.md index 52201aa66..e917d6514 100644 --- a/docs/tutorials/fundamentals/viewer.md +++ b/docs/tutorials/fundamentals/viewer.md @@ -456,11 +456,15 @@ The right side of the status bar contains some helpful tips depending on which l ## Right-click menu A context-sensitive menu is available when you right-click on any of the layers. The type of layer determines which options are available. Note that if you have multiple layers selected, the menu actions will affect all of the selected layers. The options that are not available for a layer are greyed out. The following options are available depending on which layer type you have selected: +* **Toggle visibility** - invert the visbility state (hides or show) of selected layers: hidden layers will be shown, visibile layers will be hidden. +* **Show All Selected Layers** - Set all selected layers to visible. +* **Hide All Selected Layers** - Set all selected layers to hidden. +* **Show All Unselected Layers** - Set all *unselected* layers to visible. +* **Hide All Unselected Layers** - Set all *unselected* layers to hidden. * **Duplicate Layer** - creates a second copy of the selected layer. Can be used on **Points**, **Shapes**, **Labels**, and **Image** layers. This is useful for testing your analysis on a copy instead of on the original image. * **Convert to Labels** - converts an **Image** layer to a **Labels** layer. This is useful for converting a binary image segmentation map to a labels layer with each segmented object denoted by its own integer. Can also be used on a **Shapes** layer. * **Convert to Image** - converts a **Labels** layer into an **Image** layer. -* **Toggle visibility** - hides or shows the selected layer. -* **Convert datatype** - converts an **Image** or **Labels** layer into int8, int16, int32, int64, uint8, uint16, uint32, or uint64 data types. The initial data type is the data type of the data itself. +* **Convert datatype** - converts a **Labels** layer into int8, int16, int32, int64, uint8, uint16, uint32, or uint64 data types. The initial data type is the data type of the data itself. * **Make Projection** - can be used only on a layer with more than 2 dimensions, also known as a *stack*. It creates a new layer that is a projection of the layer stack with the characteristic the user selects, reducing the number of dimensions by 1. More information about the types of projections is available [here](https://medium.com/@damiandn/an-intoduction-to-biological-image-processing-in-imagej-part-3-stacks-and-stack-projections-942aa789420f). The following projections are available: * **Max** - maximum intensity projection. At each pixel position, we go through the stacks, find the pixel with the maximum intensity, and that becomes the intensity of that pixel value in the projected image. * **Min** - minimum intensity projection. Similar to the maximum intensity projection, except that the minimum pixel value is used for the projected image instead of the maximum pixel value.