From f79b68a4341c0a86a6829dec030498ac5db69b06 Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Thu, 1 Sep 2022 09:07:58 -0700
Subject: [PATCH 01/73] my workflow
---
.gitignore | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/.gitignore b/.gitignore
index 82c26f0..7a178f1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,9 +11,16 @@ data
# other
.DS_Store
+#Google drive
+*.ini
+
+# R Studio
+*.Rproj
+
**/*.pyc
.python-version
**/*.zip
pyrightconfig.json
archive/
tmp/
+.Rproj.user
From dcdf30a51fd1d0f1901ed7fed0c8d789d2e4be91 Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Thu, 1 Sep 2022 09:11:29 -0700
Subject: [PATCH 02/73] example templates for manuscript.
---
paper.bib | 59 ++++++++++++++++++++++++++++
paper.md | 115 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 174 insertions(+)
create mode 100644 paper.bib
create mode 100644 paper.md
diff --git a/paper.bib b/paper.bib
new file mode 100644
index 0000000..72e3977
--- /dev/null
+++ b/paper.bib
@@ -0,0 +1,59 @@
+@article{Pearson:2017,
+ url = {http://adsabs.harvard.edu/abs/2017arXiv170304627P},
+ Archiveprefix = {arXiv},
+ Author = {{Pearson}, S. and {Price-Whelan}, A.~M. and {Johnston}, K.~V.},
+ Eprint = {1703.04627},
+ Journal = {ArXiv e-prints},
+ Keywords = {Astrophysics - Astrophysics of Galaxies},
+ Month = mar,
+ Title = {{Gaps in Globular Cluster Streams: Pal 5 and the Galactic Bar}},
+ Year = 2017
+}
+
+@book{Binney:2008,
+ url = {http://adsabs.harvard.edu/abs/2008gady.book.....B},
+ Author = {{Binney}, J. and {Tremaine}, S.},
+ Booktitle = {Galactic Dynamics: Second Edition, by James Binney and Scott Tremaine.~ISBN 978-0-691-13026-2 (HB).~Published by Princeton University Press, Princeton, NJ USA, 2008.},
+ Publisher = {Princeton University Press},
+ Title = {{Galactic Dynamics: Second Edition}},
+ Year = 2008
+}
+
+@article{gaia,
+ author = {{Gaia Collaboration}},
+ title = "{The Gaia mission}",
+ journal = {Astronomy and Astrophysics},
+ archivePrefix = "arXiv",
+ eprint = {1609.04153},
+ primaryClass = "astro-ph.IM",
+ keywords = {space vehicles: instruments, Galaxy: structure, astrometry, parallaxes, proper motions, telescopes},
+ year = 2016,
+ month = nov,
+ volume = 595,
+ doi = {10.1051/0004-6361/201629272},
+ url = {http://adsabs.harvard.edu/abs/2016A%26A...595A...1G},
+}
+
+@article{astropy,
+ author = {{Astropy Collaboration}},
+ title = "{Astropy: A community Python package for astronomy}",
+ journal = {Astronomy and Astrophysics},
+ archivePrefix = "arXiv",
+ eprint = {1307.6212},
+ primaryClass = "astro-ph.IM",
+ keywords = {methods: data analysis, methods: miscellaneous, virtual observatory tools},
+ year = 2013,
+ month = oct,
+ volume = 558,
+ doi = {10.1051/0004-6361/201322068},
+ url = {http://adsabs.harvard.edu/abs/2013A%26A...558A..33A}
+}
+
+@misc{fidgit,
+ author = {A. M. Smith and K. Thaney and M. Hahnel},
+ title = {Fidgit: An ungodly union of GitHub and Figshare},
+ year = {2020},
+ publisher = {GitHub},
+ journal = {GitHub repository},
+ url = {https://github.com/arfon/fidgit}
+}
\ No newline at end of file
diff --git a/paper.md b/paper.md
new file mode 100644
index 0000000..beaff0c
--- /dev/null
+++ b/paper.md
@@ -0,0 +1,115 @@
+---
+title: 'Gala: A Python package for galactic dynamics'
+tags:
+ - Python
+ - astronomy
+ - dynamics
+ - galactic dynamics
+ - milky way
+authors:
+ - name: Adrian M. Price-Whelan
+ orcid: 0000-0000-0000-0000
+ equal-contrib: true
+ affiliation: "1, 2" # (Multiple affiliations must be quoted)
+ - name: Author Without ORCID
+ equal-contrib: true # (This is how you can denote equal contributions between multiple authors)
+ affiliation: 2
+ - name: Author with no affiliation
+ corresponding: true # (This is how to denote the corresponding author)
+ affiliation: 3
+affiliations:
+ - name: Lyman Spitzer, Jr. Fellow, Princeton University, USA
+ index: 1
+ - name: Institution Name, Country
+ index: 2
+ - name: Independent Researcher, Country
+ index: 3
+date: 13 August 2017
+bibliography: paper.bib
+
+# Optional fields if submitting to a AAS journal too, see this blog post:
+# https://blog.joss.theoj.org/2018/12/a-new-collaboration-with-aas-publishing
+aas-doi: 10.3847/xxxxx <- update this with the DOI from AAS once you know it.
+aas-journal: Astrophysical Journal <- The name of the AAS journal.
+---
+
+# Summary
+
+The forces on stars, galaxies, and dark matter under external gravitational
+fields lead to the dynamical evolution of structures in the universe. The orbits
+of these bodies are therefore key to understanding the formation, history, and
+future state of galaxies. The field of "galactic dynamics," which aims to model
+the gravitating components of galaxies to study their structure and evolution,
+is now well-established, commonly taught, and frequently used in astronomy.
+Aside from toy problems and demonstrations, the majority of problems require
+efficient numerical tools, many of which require the same base code (e.g., for
+performing numerical orbit integration).
+
+# Statement of need
+
+`Gala` is an Astropy-affiliated Python package for galactic dynamics. Python
+enables wrapping low-level languages (e.g., C) for speed without losing
+flexibility or ease-of-use in the user-interface. The API for `Gala` was
+designed to provide a class-based and user-friendly interface to fast (C or
+Cython-optimized) implementations of common operations such as gravitational
+potential and force evaluation, orbit integration, dynamical transformations,
+and chaos indicators for nonlinear dynamics. `Gala` also relies heavily on and
+interfaces well with the implementations of physical units and astronomical
+coordinate systems in the `Astropy` package [@astropy] (`astropy.units` and
+`astropy.coordinates`).
+
+`Gala` was designed to be used by both astronomical researchers and by
+students in courses on gravitational dynamics or astronomy. It has already been
+used in a number of scientific publications [@Pearson:2017] and has also been
+used in graduate courses on Galactic dynamics to, e.g., provide interactive
+visualizations of textbook material [@Binney:2008]. The combination of speed,
+design, and support for Astropy functionality in `Gala` will enable exciting
+scientific explorations of forthcoming data releases from the *Gaia* mission
+[@gaia] by students and experts alike.
+
+# Mathematics
+
+Single dollars ($) are required for inline mathematics e.g. $f(x) = e^{\pi/x}$
+
+Double dollars make self-standing equations:
+
+$$\Theta(x) = \left\{\begin{array}{l}
+0\textrm{ if } x < 0\cr
+1\textrm{ else}
+\end{array}\right.$$
+
+You can also use plain \LaTeX for equations
+\begin{equation}\label{eq:fourier}
+\hat f(\omega) = \int_{-\infty}^{\infty} f(x) e^{i\omega x} dx
+\end{equation}
+and refer to \autoref{eq:fourier} from text.
+
+# Citations
+
+Citations to entries in paper.bib should be in
+[rMarkdown](http://rmarkdown.rstudio.com/authoring_bibliographies_and_citations.html)
+format.
+
+If you want to cite a software repository URL (e.g. something on GitHub without a preferred
+citation) then you can do it with the example BibTeX entry below for @fidgit.
+
+For a quick reference, the following citation commands can be used:
+- `@author:2001` -> "Author et al. (2001)"
+- `[@author:2001]` -> "(Author et al., 2001)"
+- `[@author1:2001; @author2:2001]` -> "(Author1 et al., 2001; Author2 et al., 2002)"
+
+# Figures
+
+Figures can be included like this:
+![Caption for example figure.\label{fig:example}](figure.png)
+and referenced from text using \autoref{fig:example}.
+
+Figure sizes can be customized by adding an optional second parameter:
+![Caption for example figure.](figure.png){ width=20% }
+
+# Acknowledgements
+
+We acknowledge contributions from Brigitta Sipocz, Syrtis Major, and Semyeong
+Oh, and support from Kathryn Johnston during the genesis of this project.
+
+# References
\ No newline at end of file
From 756f28c114232f2847cf8b0322af60a3bad55cfb Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Thu, 1 Sep 2022 09:29:22 -0700
Subject: [PATCH 03/73] manuscript info
---
paper.md | 41 +++++++++++++++++++++++------------------
1 file changed, 23 insertions(+), 18 deletions(-)
diff --git a/paper.md b/paper.md
index beaff0c..e01fb7e 100644
--- a/paper.md
+++ b/paper.md
@@ -1,30 +1,35 @@
---
-title: 'Gala: A Python package for galactic dynamics'
+title: 'Trash AI: Web GUI for Browser-Side Computer Vision Analysis of Trash Images'
tags:
- - Python
- - astronomy
- - dynamics
- - galactic dynamics
- - milky way
+ - tensorflow.js
+ - IndexDB
+ - Plastic Pollution
+ - Trash
+ - Litter
+ - AI
+ - Image Classification
authors:
- - name: Adrian M. Price-Whelan
- orcid: 0000-0000-0000-0000
- equal-contrib: true
- affiliation: "1, 2" # (Multiple affiliations must be quoted)
- - name: Author Without ORCID
- equal-contrib: true # (This is how you can denote equal contributions between multiple authors)
- affiliation: 2
- - name: Author with no affiliation
+ - name: Win Cowger
+ orcid: 0000-0001-9226-3104
+ affiliation: 1 # (Multiple affiliations must be quoted)
+ - name: Steven Hollingsworth
corresponding: true # (This is how to denote the corresponding author)
+ # equal-contrib: true # (This is how you can denote equal contributions between multiple authors)
+ affiliation: 2
+ - name: Day Fey
+ affiliation: 2
+ - name: Mary C Norris
+ affiliation: 2
+ - name: Walter Yu
affiliation: 3
affiliations:
- - name: Lyman Spitzer, Jr. Fellow, Princeton University, USA
+ - name: Moore Institute for Plastic Pollution Research, USA
index: 1
- - name: Institution Name, Country
+ - name: Code for Sacramento, USA
index: 2
- - name: Independent Researcher, Country
+ - name: California Department of Transportation, USA
index: 3
-date: 13 August 2017
+date: 1 September 2022
bibliography: paper.bib
# Optional fields if submitting to a AAS journal too, see this blog post:
From d6b503bc742fa8f7e4f0a370b2badf26ce856774 Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 5 Sep 2022 13:57:22 -0700
Subject: [PATCH 04/73] update citations
---
paper.bib | 215 ++++++++++++++++++++++++++++++++++++++++--------------
paper.md | 96 +++++++-----------------
2 files changed, 186 insertions(+), 125 deletions(-)
diff --git a/paper.bib b/paper.bib
index 72e3977..96ad061 100644
--- a/paper.bib
+++ b/paper.bib
@@ -1,59 +1,162 @@
-@article{Pearson:2017,
- url = {http://adsabs.harvard.edu/abs/2017arXiv170304627P},
- Archiveprefix = {arXiv},
- Author = {{Pearson}, S. and {Price-Whelan}, A.~M. and {Johnston}, K.~V.},
- Eprint = {1703.04627},
- Journal = {ArXiv e-prints},
- Keywords = {Astrophysics - Astrophysics of Galaxies},
- Month = mar,
- Title = {{Gaps in Globular Cluster Streams: Pal 5 and the Galactic Bar}},
- Year = 2017
-}
-
-@book{Binney:2008,
- url = {http://adsabs.harvard.edu/abs/2008gady.book.....B},
- Author = {{Binney}, J. and {Tremaine}, S.},
- Booktitle = {Galactic Dynamics: Second Edition, by James Binney and Scott Tremaine.~ISBN 978-0-691-13026-2 (HB).~Published by Princeton University Press, Princeton, NJ USA, 2008.},
- Publisher = {Princeton University Press},
- Title = {{Galactic Dynamics: Second Edition}},
- Year = 2008
-}
-
-@article{gaia,
- author = {{Gaia Collaboration}},
- title = "{The Gaia mission}",
- journal = {Astronomy and Astrophysics},
- archivePrefix = "arXiv",
- eprint = {1609.04153},
- primaryClass = "astro-ph.IM",
- keywords = {space vehicles: instruments, Galaxy: structure, astrometry, parallaxes, proper motions, telescopes},
- year = 2016,
- month = nov,
- volume = 595,
- doi = {10.1051/0004-6361/201629272},
- url = {http://adsabs.harvard.edu/abs/2016A%26A...595A...1G},
-}
-
-@article{astropy,
- author = {{Astropy Collaboration}},
- title = "{Astropy: A community Python package for astronomy}",
- journal = {Astronomy and Astrophysics},
- archivePrefix = "arXiv",
- eprint = {1307.6212},
- primaryClass = "astro-ph.IM",
- keywords = {methods: data analysis, methods: miscellaneous, virtual observatory tools},
- year = 2013,
- month = oct,
- volume = 558,
- doi = {10.1051/0004-6361/201322068},
- url = {http://adsabs.harvard.edu/abs/2013A%26A...558A..33A}
-}
-
-@misc{fidgit,
- author = {A. M. Smith and K. Thaney and M. Hahnel},
- title = {Fidgit: An ungodly union of GitHub and Figshare},
+
+@software{glenn_jocher_2020_4154370,
+ author = {Glenn Jocher and
+ Alex Stoken and
+ Jirka Borovec and
+ NanoCode012 and
+ ChristopherSTAN and
+ Liu Changyu and
+ Laughing and
+ tkianai and
+ Adam Hogan and
+ lorenzomammana and
+ yxNONG and
+ AlexWang1900 and
+ Laurentiu Diaconu and
+ Marc and
+ wanghaoyang0106 and
+ ml5ah and
+ Doug and
+ Francisco Ingham and
+ Frederik and
+ Guilhen and
+ Hatovix and
+ Jake Poznanski and
+ Jiacong Fang and
+ Lijun Yu 于力军 and
+ changyu98 and
+ Mingyu Wang and
+ Naman Gupta and
+ Osama Akhtar and
+ PetrDvoracek and
+ Prashant Rai},
+ title = {{ultralytics/yolov5: v3.1 - Bug Fixes and
+ Performance Improvements}},
+ month = oct,
+ year = 2020,
+ publisher = {Zenodo},
+ version = {v3.1},
+ doi = {10.5281/zenodo.4154370},
+ url = {https://doi.org/10.5281/zenodo.4154370}
+}
+
+@article{van Lieshout:2020,
+author = {van Lieshout, Colin and van Oeveren, Kees and van Emmerik, Tim and Postma, Eric},
+title = {Automated River Plastic Monitoring Using Deep Learning and Cameras},
+journal = {Earth and Space Science},
+volume = {7},
+number = {8},
+pages = {e2019EA000960},
+keywords = {plastic pollution, object detection, automated monitoring, deep learning, artificial intelligence, river plastic},
+doi = {https://doi.org/10.1029/2019EA000960},
+url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2019EA000960},
+eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2019EA000960},
+note = {e2019EA000960 10.1029/2019EA000960},
+abstract = {Abstract Quantifying plastic pollution on surface water is essential to understand and mitigate the impact of plastic pollution to the environment. Current monitoring methods such as visual counting are labor intensive. This limits the feasibility of scaling to long-term monitoring at multiple locations. We present an automated method for monitoring plastic pollution that overcomes this limitation. Floating macroplastics are detected from images of the water surface using deep learning. We perform an experimental evaluation of our method using images from bridge-mounted cameras at five different river locations across Jakarta, Indonesia. The four main results of the experimental evaluation are as follows. First, we realize a method that obtains a reliable estimate of plastic density (68.7\% precision). Our monitoring method successfully distinguishes plastics from environmental elements, such as water surface reflection and organic waste. Second, when trained on one location, the method generalizes well to new locations with relatively similar conditions without retraining (≈50\% average precision). Third, generalization to new locations with considerably different conditions can be boosted by retraining on only 50 objects of the new location (improving precision from ≈20\% to ≈42\%). Fourth, our method matches visual counting methods and detects ≈35\% more plastics, even more so during periods of plastic transport rates of above 10 items per meter per minute. Taken together, these results demonstrate that our method is a promising way of monitoring plastic pollution. By extending the variety of the data set the monitoring method can be readily applied at a larger scale.},
+year = {2020}
+}
+
+@misc{WADE AI:2020,
+ author = {K. Kerge, W. Cowger, K. Haamer, K. Ehala, K. Kivistik, T. Tammiste, M. Vares},
+ title = {WADE AI Trash Detection},
year = {2020},
publisher = {GitHub},
journal = {GitHub repository},
- url = {https://github.com/arfon/fidgit}
-}
\ No newline at end of file
+ url = {https://github.com/letsdoitworld/wade-ai}
+}
+
+@misc{Wuu:2018,
+ author = {S. Wuu},
+ title = {Litter Detection Tensorflow},
+ year = {2018},
+ publisher = {GitHub},
+ journal = {GitHub repository},
+ url = {https://github.com/isaychris/litter-detection-tensorflow}
+}
+
+@misc{Waterboards:2018,
+ author = {California State Water Resources Control Board},
+ title = {Trash Tracker},
+ year = {2018},
+ publisher = {GitHub},
+ journal = {GitHub repository},
+ url = {https://github.com/CAWaterBoardDataCenter/Trash-Tracker}
+}
+
+
+
+@ARTICLE{Lynch:2018,
+ title = "{OpenLitterMap.com} -- Open Data on Plastic Pollution with
+ Blockchain Rewards (Littercoin)",
+ author = "Lynch, Se{\'a}n",
+ abstract = "OpenLitterMap rewards users with Littercoin for producing open
+ data on litter. Open data on the geospatial characteristics of
+ litter provide means of invoking and evaluating responses to
+ plastic pollution. OpenLitterMap currently works as a web app on
+ all devices with native mobile apps in development. The stack
+ includes the integration of the Laravel PHP Framework on the
+ backend; Vue for frontend reactivity; NativeScript-Vue for mobile
+ apps; Bulma for CSS; Leaflet for web-mapping; Turf.js for
+ geospatial analysis; the Ethereum Blockchain for tokenization;
+ Stripe; ChartJS; AWS; and more. Anywhere from a single cigarette
+ butt to the contents of an entire beach or street clean can be
+ logged in a single geotagged photo. Alternatively, a simple index
+ may be used if litter is incalculable. The open data includes an
+ increasing 100+ pre-defined types of litter; 20+ corporate
+ brands; verification status; coordinates; timestamp; phone model;
+ the latest OpenStreetMap address at each location; and the litter
+ presence as a Boolean. To date, 100\% of all submitted data (~
+ 8200 photos, ~ 28,000 litter from over 150 contributors) has been
+ manually verified which is being used to develop machine learning
+ algorithms.",
+ journal = "Open Geospatial Data, Software and Standards",
+ volume = 3,
+ number = 1,
+ pages = "6",
+ month = jun,
+ year = 2018
+}
+
+@article{Majchrowska:2022,
+title = {Deep learning-based waste detection in natural and urban environments},
+journal = {Waste Management},
+volume = {138},
+pages = {274-284},
+year = {2022},
+issn = {0956-053X},
+doi = {https://doi.org/10.1016/j.wasman.2021.12.001},
+url = {https://www.sciencedirect.com/science/article/pii/S0956053X21006474},
+author = {Sylwia Majchrowska and Agnieszka Mikołajczyk and Maria Ferlin and Zuzanna Klawikowska and Marta A. Plantykow and Arkadiusz Kwasigroch and Karol Majek},
+keywords = {Object detection, Semi-supervised learning, Waste classification benchmarks, Waste detection benchmarks, Waste localization, Waste recognition},
+abstract = {Waste pollution is one of the most significant environmental issues in the modern world. The importance of recycling is well known, both for economic and ecological reasons, and the industry demands high efficiency. Current studies towards automatic waste detection are hardly comparable due to the lack of benchmarks and widely accepted standards regarding the used metrics and data. Those problems are addressed in this article by providing a critical analysis of over ten existing waste datasets and a brief but constructive review of the existing Deep Learning-based waste detection approaches. This article collects and summarizes previous studies and provides the results of authors’ experiments on the presented datasets, all intended to create a first replicable baseline for litter detection. Moreover, new benchmark datasets detect-waste and classify-waste are proposed that are merged collections from the above-mentioned open-source datasets with unified annotations covering all possible waste categories: bio, glass, metal and plastic, non-recyclable, other, paper, and unknown. Finally, a two-stage detector for litter localization and classification is presented. EfficientDet-D2 is used to localize litter, and EfficientNet-B2 to classify the detected waste into seven categories. The classifier is trained in a semi-supervised fashion making the use of unlabeled images. The proposed approach achieves up to 70% of average precision in waste detection and around 75% of classification accuracy on the test dataset. The code and annotations used in the studies are publicly available online11https://github.com/wimlds-trojmiasto/detect-waste..}
+}
+
+@misc{Proença:2020,
+ doi = {10.48550/ARXIV.2003.06975},
+
+ url = {https://arxiv.org/abs/2003.06975},
+
+ author = {Proença, Pedro F and Simões, Pedro},
+
+ keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
+
+ title = {TACO: Trash Annotations in Context for Litter Detection},
+
+ publisher = {arXiv},
+
+ year = {2020},
+
+ copyright = {arXiv.org perpetual, non-exclusive license}
+}
+
+
+@TECHREPORT{Moore:2020,
+ title = "California Trash Monitoring Methods and Assessments Playbook",
+ author = "Moore, Shelly and Hale, Tony and Weisberg, Stephen B and
+ Flores, Lorenzo and Kauhanen, Pete",
+ institution = "San Francisco Estuary Institute",
+ year = 2020
+}
+
+
+
diff --git a/paper.md b/paper.md
index e01fb7e..5d084d8 100644
--- a/paper.md
+++ b/paper.md
@@ -1,5 +1,5 @@
---
-title: 'Trash AI: Web GUI for Browser-Side Computer Vision Analysis of Trash Images'
+title: 'Trash AI: A Web GUI for Serverless Computer Vision Analysis of Images of Trash'
tags:
- tensorflow.js
- IndexDB
@@ -8,13 +8,13 @@ tags:
- Litter
- AI
- Image Classification
+ - Serverless
authors:
- name: Win Cowger
orcid: 0000-0001-9226-3104
affiliation: 1 # (Multiple affiliations must be quoted)
- name: Steven Hollingsworth
corresponding: true # (This is how to denote the corresponding author)
- # equal-contrib: true # (This is how you can denote equal contributions between multiple authors)
affiliation: 2
- name: Day Fey
affiliation: 2
@@ -22,6 +22,10 @@ authors:
affiliation: 2
- name: Walter Yu
affiliation: 3
+ - name: Kristiina Kerge
+ affiliation: 4
+ - name: Kris Haamer
+ affiliation: 4
affiliations:
- name: Moore Institute for Plastic Pollution Research, USA
index: 1
@@ -29,92 +33,46 @@ affiliations:
index: 2
- name: California Department of Transportation, USA
index: 3
+ - nam: Let's Do It Foundation, Estonia
date: 1 September 2022
bibliography: paper.bib
-# Optional fields if submitting to a AAS journal too, see this blog post:
-# https://blog.joss.theoj.org/2018/12/a-new-collaboration-with-aas-publishing
-aas-doi: 10.3847/xxxxx <- update this with the DOI from AAS once you know it.
-aas-journal: Astrophysical Journal <- The name of the AAS journal.
---
# Summary
-The forces on stars, galaxies, and dark matter under external gravitational
-fields lead to the dynamical evolution of structures in the universe. The orbits
-of these bodies are therefore key to understanding the formation, history, and
-future state of galaxies. The field of "galactic dynamics," which aims to model
-the gravitating components of galaxies to study their structure and evolution,
-is now well-established, commonly taught, and frequently used in astronomy.
-Aside from toy problems and demonstrations, the majority of problems require
-efficient numerical tools, many of which require the same base code (e.g., for
-performing numerical orbit integration).
+Although computer vision classification routines have been created for trash, they have not been accessible to most researchers due to the challenges in deploying the models. Trash AI is a web GUI (Graphical User Interface) for serverless computer vision classification of batch images with trash in them hosted at www.trashai.org. With a single batch upload and download, a user can automatically describe the types and quantities of trash in all of their images.
# Statement of need
-`Gala` is an Astropy-affiliated Python package for galactic dynamics. Python
-enables wrapping low-level languages (e.g., C) for speed without losing
-flexibility or ease-of-use in the user-interface. The API for `Gala` was
-designed to provide a class-based and user-friendly interface to fast (C or
-Cython-optimized) implementations of common operations such as gravitational
-potential and force evaluation, orbit integration, dynamical transformations,
-and chaos indicators for nonlinear dynamics. `Gala` also relies heavily on and
-interfaces well with the implementations of physical units and astronomical
-coordinate systems in the `Astropy` package [@astropy] (`astropy.units` and
-`astropy.coordinates`).
+The trash in the environment is a widespread problem that is difficult to measure. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an efficient yet effective manner `[@Majchrowska:2022; @Proença:2020; @Moore:2020; @van Lieshout:2020; @WADE AI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]`. An app-based reporting of trash using cell phones, laptops, and other devices has been a valuable solution `[@Lynch:2018]`. Applications for AI in detecting trash currently include: images from bridges `[@van Lieshout:2020]`, drone imaging `[@Moore:2020]`, cameras on street sweepers `[@Waterboards:2018]`, and cell phone app based reporting of trash `[@Lynch:2018]`. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average data scientist. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
-`Gala` was designed to be used by both astronomical researchers and by
-students in courses on gravitational dynamics or astronomy. It has already been
-used in a number of scientific publications [@Pearson:2017] and has also been
-used in graduate courses on Galactic dynamics to, e.g., provide interactive
-visualizations of textbook material [@Binney:2008]. The combination of speed,
-design, and support for Astropy functionality in `Gala` will enable exciting
-scientific explorations of forthcoming data releases from the *Gaia* mission
-[@gaia] by students and experts alike.
-
-# Mathematics
-
-Single dollars ($) are required for inline mathematics e.g. $f(x) = e^{\pi/x}$
-
-Double dollars make self-standing equations:
-
-$$\Theta(x) = \left\{\begin{array}{l}
-0\textrm{ if } x < 0\cr
-1\textrm{ else}
-\end{array}\right.$$
-
-You can also use plain \LaTeX for equations
-\begin{equation}\label{eq:fourier}
-\hat f(\omega) = \int_{-\infty}^{\infty} f(x) e^{i\omega x} dx
-\end{equation}
-and refer to \autoref{eq:fourier} from text.
+# Example
+ Video
+ Figures can be included like this:
+![Caption for example figure.\label{fig:example}](figure.png)
+and referenced from text using \autoref{fig:example}.
-# Citations
+Figure sizes can be customized by adding an optional second parameter:
+![Caption for example figure.](figure.png){ width=20% }
-Citations to entries in paper.bib should be in
-[rMarkdown](http://rmarkdown.rstudio.com/authoring_bibliographies_and_citations.html)
-format.
+# Method
-If you want to cite a software repository URL (e.g. something on GitHub without a preferred
-citation) then you can do it with the example BibTeX entry below for @fidgit.
+## AI Training
+An algorithm trained on the TACO (http://tacodataset.org/) dataset and using YOLO 5 (pytorch.org) which analyzes the images in the browser and provides the prediction of the model as a graphical output. The raw data from the model and labeled images can be downloaded in a batch download to expedite analyses. Any data uploaded to the platform is automatically saved to an S3 bucket which we can use to improve the model over time.
-For a quick reference, the following citation commands can be used:
-- `@author:2001` -> "Author et al. (2001)"
-- `[@author:2001]` -> "(Author et al., 2001)"
-- `[@author1:2001; @author2:2001]` -> "(Author1 et al., 2001; Author2 et al., 2002)"
+The AI model was developed starting with the TACO dataset which was available within a Jupyter Notebook on Kaggle (https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced which used the default YOLO v5 model `[@glenn_jocher_2020_4154370]` as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
-# Figures
+## Limitations
+From our experience, the accuracy of the model varies depending on the quality of the images and their settings. The algorithm is primarily trained on single pieces of trash in the image and the model will likely excel for that use case.
-Figures can be included like this:
-![Caption for example figure.\label{fig:example}](figure.png)
-and referenced from text using \autoref{fig:example}.
+# Availability
+Trash AI is hosted on the web at www.trashai.org. The source code is available on github https://github.com/code4sac/trash-ai with an MIT (https://mit-license.org/) license. The source code can be run offline on any machine that can install Docker and Docker-compose (www.docker.com)). Documentation is hosted by Code for Sacramento on github and will be updated with each release. The image datasets shared to the tool need to be previewed before being shared with others due to security and moderation concerns.
-Figure sizes can be customized by adding an optional second parameter:
-![Caption for example figure.](figure.png){ width=20% }
+# Future Goals
+This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get the data that people share to our S3 bucket into the TACO training dataset (CITE) and trained model. Future models will expand the annotations to include the trash taxonomy (CITE) classes and add an option to choose between other models besides the current model.
# Acknowledgements
-
-We acknowledge contributions from Brigitta Sipocz, Syrtis Major, and Semyeong
-Oh, and support from Kathryn Johnston during the genesis of this project.
+Code for Sacramento led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento team, part of code for America, without whom this project would not have been possible and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. We acknowledge financial support from McPike Zima Charitable Foundation.
# References
\ No newline at end of file
From 6e3ff73ca5eca53f8c27a0a1f63ec7a2667798d9 Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 5 Sep 2022 14:18:57 -0700
Subject: [PATCH 05/73] cleanup text
---
paper.bib | 45 ++++++++++++++++++++++++++++++++++++++++++++-
paper.md | 13 +++++++------
2 files changed, 51 insertions(+), 7 deletions(-)
diff --git a/paper.bib b/paper.bib
index 96ad061..d227164 100644
--- a/paper.bib
+++ b/paper.bib
@@ -1,5 +1,5 @@
-@software{glenn_jocher_2020_4154370,
+@software{Jocher:2020,
author = {Glenn Jocher and
Alex Stoken and
Jirka Borovec and
@@ -84,6 +84,49 @@ @misc{Waterboards:2018
}
+@ARTICLE{Hapich:2022,
+ title = "Trash Taxonomy Tool: harmonizing classification systems used to
+ describe trash in environments",
+ author = "Hapich, Hannah and Cowger, Win and Gray, Andrew and Tangri, Neil
+ and Hale, Tony and Magdy, Amr and Vermilye, Antoinette and Yu,
+ Walter and Ayres, Dick and Moore, Charles and Vermilye, John and
+ Singh, Samiksha and Haiman, Aaron N K and Youngblood, Kathryn and
+ Kang, Yunfan and McCauley, Margaret and Lok, Trevor and Moore,
+ Shelly and Baggs, Eric and Lippiatt, Sherry and Kohler, Peter and
+ Conley, Gary and Taing, Janna and Mock, Jeremiah",
+ abstract = "Despite global efforts to monitor, mitigate against, and prevent
+ trash (mismanaged solid waste) pollution, no harmonized trash
+ typology system has been widely adopted worldwide. This impedes
+ the merging of datasets and comparative analyses. We addressed
+ this problem by 1) assessing the state of trash typology and
+ comparability, 2) developing a standardized and harmonized
+ framework of relational tables and tools, and 3) informing
+ practitioners about challenges and potential solutions. We
+ analyzed 68 trash survey lists to assess similarities and
+ differences in classification. We created comprehensive
+ harmonized hierarchical tables and alias tables for item and
+ material classes. On average, the 68 survey lists had 20.8\% of
+ item classes in common and 29.9\% of material classes in common.
+ Multiple correspondence analysis showed that the 68 surveys were
+ not significantly different regarding organization type,
+ ecosystem focus, or substrate focus. We built the Trash Taxonomy
+ Tool (TTT) web-based application with query features and open
+ access at openanalysis.org/trashtaxonomy. The TTT can be applied
+ to improve, create, and compare trash surveys, and provides
+ practitioners with tools to integrate datasets and maximize
+ comparability. The use of TTT will ultimately facilitate
+ improvements in assessing trends across space and time,
+ identifying targets for mitigation, evaluating the effectiveness
+ of prevention measures, informing policymaking, and holding
+ producers responsible.",
+ journal = "Microplastics and Nanoplastics",
+ volume = 2,
+ number = 1,
+ pages = "15",
+ month = jun,
+ year = 2022
+}
+
@ARTICLE{Lynch:2018,
title = "{OpenLitterMap.com} -- Open Data on Plastic Pollution with
diff --git a/paper.md b/paper.md
index 5d084d8..116525a 100644
--- a/paper.md
+++ b/paper.md
@@ -58,19 +58,20 @@ Figure sizes can be customized by adding an optional second parameter:
# Method
-## AI Training
-An algorithm trained on the TACO (http://tacodataset.org/) dataset and using YOLO 5 (pytorch.org) which analyzes the images in the browser and provides the prediction of the model as a graphical output. The raw data from the model and labeled images can be downloaded in a batch download to expedite analyses. Any data uploaded to the platform is automatically saved to an S3 bucket which we can use to improve the model over time.
+## Workflow Overview
+Trash AI is trained on the [TACO dataset](http://tacodataset.org/) using [YOLO 5](pytorch.org). Trash AI stores images in [IndexDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API) to keep the data primarily browser side and uses [tensorflow.js](https://www.tensorflow.org/js) to keep analysis browser side too. When images are uploaded to the browser, Trash AI provides the prediction of the model as a graphical output. The raw data from the model and labeled images can be downloaded in a batch download to expedite analyses. Any data uploaded to the platform is automatically saved to an [S3 bucket](https://aws.amazon.com/s3/) which we can use to improve the model over time.
-The AI model was developed starting with the TACO dataset which was available within a Jupyter Notebook on Kaggle (https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced which used the default YOLO v5 model `[@glenn_jocher_2020_4154370]` as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
+## AI Training
+The AI model was developed starting with the TACO dataset which was available with a complimentary Jupyter Notebook on Kaggle (https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced which used the default YOLO v5 model `[@Jocher:2020]` as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
## Limitations
-From our experience, the accuracy of the model varies depending on the quality of the images and their settings. The algorithm is primarily trained on single pieces of trash in the image and the model will likely excel for that use case.
+From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g. a drink bottle on someone's desk vs in the forest laying on the ground). This and other complexities to trash classification make a general trash AI a challenging (yet worthwhile) long term endeavor. The algorithm is primarily trained on single pieces of trash in the image with the trash laying on the ground and the model will likely excel for that use case currently.
# Availability
-Trash AI is hosted on the web at www.trashai.org. The source code is available on github https://github.com/code4sac/trash-ai with an MIT (https://mit-license.org/) license. The source code can be run offline on any machine that can install Docker and Docker-compose (www.docker.com)). Documentation is hosted by Code for Sacramento on github and will be updated with each release. The image datasets shared to the tool need to be previewed before being shared with others due to security and moderation concerns.
+Trash AI is hosted on the web at www.trashai.org. The source code is [available on github](https://github.com/code4sac/trash-ai) with an [MIT license](https://mit-license.org/). The source code can be run offline on any machine that can install [Docker and Docker-compose](www.docker.com). [Documentation](https://github.com/code4sac/trash-ai#ai-for-litter-detection-web-application) is maintained by Code for Sacramento on Github and will be updated with each release. The image datasets shared to the tool are in an S3 Bucket that needs to be reviewed before being shared with others due to security and moderation concerns but can be acquired by [contacting the repo maintaniers](https://github.com/code4sac/trash-ai/graphs/contributors).
# Future Goals
-This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get the data that people share to our S3 bucket into the TACO training dataset (CITE) and trained model. Future models will expand the annotations to include the trash taxonomy (CITE) classes and add an option to choose between other models besides the current model.
+This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get the data that people share to our S3 bucket into the [TACO training dataset](http://tacodataset.org/) and trained model. Future models will expand the annotations to include the Trash Taxonomy `[@Hapich:2022]` classes and add an option to choose between other models besides the current model.
# Acknowledgements
Code for Sacramento led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento team, part of code for America, without whom this project would not have been possible and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. We acknowledge financial support from McPike Zima Charitable Foundation.
From d249780c3804b2c1f2558147e60dd939b4e0f630 Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 5 Sep 2022 14:26:23 -0700
Subject: [PATCH 06/73] cite
---
paper.md | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index 116525a..f3cc42a 100644
--- a/paper.md
+++ b/paper.md
@@ -33,7 +33,8 @@ affiliations:
index: 2
- name: California Department of Transportation, USA
index: 3
- - nam: Let's Do It Foundation, Estonia
+ - name: Let's Do It Foundation, Estonia
+ index: 4
date: 1 September 2022
bibliography: paper.bib
From e5a3fb27548fec0fef2ebb1161e7e82a7c48acf4 Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 5 Sep 2022 14:34:24 -0700
Subject: [PATCH 07/73] update readme
---
README.md | 17 ++++++++++-------
1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/README.md b/README.md
index e4857b1..f05204c 100644
--- a/README.md
+++ b/README.md
@@ -7,21 +7,24 @@
### Project Summary
-- Description: Develop web application where users can upload photos of street litter which will be labeled using computer vision to detect and categorize litter type.
-- Benefit: Enhance abilities of researchers to quickly label photos and categorize types.
+Trash AI is a web application where users can upload photos of litter which will be labeled using computer vision to detect and categorize litter in the image by type. Trash AI will enhance the abilities of researchers to quickly label trash in photos.
+
+## Deployment
+
+You can simply go to www.trashai.org to start using the tool or deploy it yourself. Current self-deployment options are local deployment with docker to remote on Amazon Web Services (AWS).
### [Local Development](./docs/localdev.md)
-Run the environment live with localstack, and docker
+Run the environment live with localstack and docker.
### [AWS Deployments](./docs/git-aws-account-setup.md)
-Instructions on bringing up a new deployment
+Instructions on bringing up a new AWS deployment.
-### [CI/CD - Github Actions](./docs/github-actions.md)
+#### [Continuous Integration and Continuous Delivery (CI/CD) - Github Actions](./docs/github-actions.md)
Mostly CD at this point.
-### [Github Actions AWS Deployment Role](./docs/github-actions-deployment-role.md)
+#### [Github Actions AWS Deployment Role](./docs/github-actions-deployment-role.md)
-Runs the complex stuff so you don't have to
+Runs the complex stuff so you don't have to.
From f24c5f69b77b6f3841ffac2540df73c110ba77ee Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 5 Sep 2022 14:38:16 -0700
Subject: [PATCH 08/73] contribute
---
README.md | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/README.md b/README.md
index f05204c..9502943 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,9 @@
-## AI for Litter Detection (Web Application)
+# AI for Litter Detection (Web Application)
### Project Information
-- Sponsor: Win Cowger, UC Riverside - Trash Data Projects
-- Meeting Times: Wednesdays at 6:30pm (Weekly Brigade Meetings)
+- Sponsor: Win Cowger, [Moore Institute for Plastic Pollution Research](https://mooreplasticresearch.org/)
+- Meeting Times: Wednesdays at 6:30pm PT [Weekly Brigade Meetings](https://www.meetup.com/code4sac/)
### Project Summary
@@ -28,3 +28,6 @@ Mostly CD at this point.
#### [Github Actions AWS Deployment Role](./docs/github-actions-deployment-role.md)
Runs the complex stuff so you don't have to.
+
+## Contribute
+We welcome contributions of all kinds. To get started, open an [issue](https://github.com/code4sac/trash-ai/issues) or [pull request](https://github.com/code4sac/trash-ai/pulls).
From 31220025ce64875e7be1f26154eac69eae3d0f51 Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 5 Sep 2022 14:42:55 -0700
Subject: [PATCH 09/73] add website badge
---
README.md | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 9502943..4d915f5 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,6 @@
# AI for Litter Detection (Web Application)
+[![Website](https://img.shields.io/badge/web)](https://www.trashai.org)
+
### Project Information
@@ -30,4 +32,4 @@ Mostly CD at this point.
Runs the complex stuff so you don't have to.
## Contribute
-We welcome contributions of all kinds. To get started, open an [issue](https://github.com/code4sac/trash-ai/issues) or [pull request](https://github.com/code4sac/trash-ai/pulls).
+We welcome contributions of all kinds. To get started, open an [issue](https://github.com/code4sac/trash-ai/issues) or [pull request](https://github.com/code4sac/trash-ai/pulls). Here are some ideas on [How to Contribute](https://opensource.guide/how-to-contribute/). Please adhere to this project's [Code of Conduct](https://www.contributor-covenant.org/version/2/1/code_of_conduct/).
From 968b75181d23fee8c8caa20f8022eb348c7085ed Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 5 Sep 2022 14:50:03 -0700
Subject: [PATCH 10/73] update web link
---
README.md | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/README.md b/README.md
index 4d915f5..7cdb900 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
# AI for Litter Detection (Web Application)
-[![Website](https://img.shields.io/badge/web)](https://www.trashai.org)
+[![Website](https://img.shields.io/badge/Web-TrashAI.org-blue)](https://www.trashai.org)
### Project Information
@@ -17,19 +17,19 @@ You can simply go to www.trashai.org to start using the tool or deploy it yourse
### [Local Development](./docs/localdev.md)
-Run the environment live with localstack and docker.
+- Run the environment live with localstack and docker.
-### [AWS Deployments](./docs/git-aws-account-setup.md)
+### [AWS Deployment](./docs/git-aws-account-setup.md)
-Instructions on bringing up a new AWS deployment.
+- Instructions on bringing up a new AWS deployment.
#### [Continuous Integration and Continuous Delivery (CI/CD) - Github Actions](./docs/github-actions.md)
-Mostly CD at this point.
+- Mostly CD at this point.
#### [Github Actions AWS Deployment Role](./docs/github-actions-deployment-role.md)
-Runs the complex stuff so you don't have to.
+- Runs the complex stuff so you don't have to.
## Contribute
We welcome contributions of all kinds. To get started, open an [issue](https://github.com/code4sac/trash-ai/issues) or [pull request](https://github.com/code4sac/trash-ai/pulls). Here are some ideas on [How to Contribute](https://opensource.guide/how-to-contribute/). Please adhere to this project's [Code of Conduct](https://www.contributor-covenant.org/version/2/1/code_of_conduct/).
From bf04070842d2f04776ae4dbdb02e935522f847ff Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 5 Sep 2022 14:51:09 -0700
Subject: [PATCH 11/73] Update README.md
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 7cdb900..06bb06f 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-# AI for Litter Detection (Web Application)
+# Trash AI: Serverless image classification of trash (Web Application)
[![Website](https://img.shields.io/badge/Web-TrashAI.org-blue)](https://www.trashai.org)
From f053598cf7abeefdb1b42b730687806a35405016 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 5 Sep 2022 14:51:41 -0700
Subject: [PATCH 12/73] Update README.md
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 06bb06f..ad041e4 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-# Trash AI: Serverless image classification of trash (Web Application)
+# Trash AI: Web application for serverless image classification of trash
[![Website](https://img.shields.io/badge/Web-TrashAI.org-blue)](https://www.trashai.org)
From ef37cc0a17ddb77d4c0c28d57ae022a7b4cc22d3 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 5 Sep 2022 14:57:22 -0700
Subject: [PATCH 13/73] add video link
---
README.md | 3 +++
1 file changed, 3 insertions(+)
diff --git a/README.md b/README.md
index ad041e4..015c746 100644
--- a/README.md
+++ b/README.md
@@ -11,6 +11,9 @@
Trash AI is a web application where users can upload photos of litter which will be labeled using computer vision to detect and categorize litter in the image by type. Trash AI will enhance the abilities of researchers to quickly label trash in photos.
+#### Demo
+[![image](https://user-images.githubusercontent.com/26821843/188515526-33e1196b-6830-4187-8fe4-e68b2bd4019e.png)](https://youtu.be/HHrjUpQynUM)
+
## Deployment
You can simply go to www.trashai.org to start using the tool or deploy it yourself. Current self-deployment options are local deployment with docker to remote on Amazon Web Services (AWS).
From 4f46bd0a90e41b9f6736f5ff0c06378fa9212bdb Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 5 Sep 2022 15:02:29 -0700
Subject: [PATCH 14/73] auto update
---
draft-pdf.yml | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
create mode 100644 draft-pdf.yml
diff --git a/draft-pdf.yml b/draft-pdf.yml
new file mode 100644
index 0000000..61ae864
--- /dev/null
+++ b/draft-pdf.yml
@@ -0,0 +1,23 @@
+on: [push]
+
+jobs:
+ paper:
+ runs-on: ubuntu-latest
+ name: Paper Draft
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Build draft PDF
+ uses: openjournals/openjournals-draft-action@master
+ with:
+ journal: joss
+ # This should be the path to the paper within your repo.
+ paper-path: paper.md
+ - name: Upload
+ uses: actions/upload-artifact@v1
+ with:
+ name: paper
+ # This is the output path where Pandoc will write the compiled
+ # PDF. Note, this should be the same directory as the input
+ # paper.md
+ path: paper.pdf
\ No newline at end of file
From 11f5eaa691d36d4b990f54853164982b6bf42891 Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 5 Sep 2022 15:04:40 -0700
Subject: [PATCH 15/73] utility files device specific
---
.gitignore | 2 ++
1 file changed, 2 insertions(+)
diff --git a/.gitignore b/.gitignore
index 7a178f1..678e1f1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -24,3 +24,5 @@ pyrightconfig.json
archive/
tmp/
.Rproj.user
+delete.bat
+.Rhistory
From f97c3d7fbfa2cc3c20c9fe0cbc7575ef8db4a042 Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 5 Sep 2022 15:11:47 -0700
Subject: [PATCH 16/73] move the pdf generator to workflows
---
draft-pdf.yml => .github/workflows/draft-pdf.yml | 0
1 file changed, 0 insertions(+), 0 deletions(-)
rename draft-pdf.yml => .github/workflows/draft-pdf.yml (100%)
diff --git a/draft-pdf.yml b/.github/workflows/draft-pdf.yml
similarity index 100%
rename from draft-pdf.yml
rename to .github/workflows/draft-pdf.yml
From 99cde810f54b341957e731ded981785b9fc1ff2a Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 5 Sep 2022 15:23:35 -0700
Subject: [PATCH 17/73] test error in paper.bib
---
paper.bib | 160 -----------------------------------------------
paperbibtest.bib | 153 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 153 insertions(+), 160 deletions(-)
create mode 100644 paperbibtest.bib
diff --git a/paper.bib b/paper.bib
index d227164..0991b16 100644
--- a/paper.bib
+++ b/paper.bib
@@ -40,166 +40,6 @@ @software{Jocher:2020
url = {https://doi.org/10.5281/zenodo.4154370}
}
-@article{van Lieshout:2020,
-author = {van Lieshout, Colin and van Oeveren, Kees and van Emmerik, Tim and Postma, Eric},
-title = {Automated River Plastic Monitoring Using Deep Learning and Cameras},
-journal = {Earth and Space Science},
-volume = {7},
-number = {8},
-pages = {e2019EA000960},
-keywords = {plastic pollution, object detection, automated monitoring, deep learning, artificial intelligence, river plastic},
-doi = {https://doi.org/10.1029/2019EA000960},
-url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2019EA000960},
-eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2019EA000960},
-note = {e2019EA000960 10.1029/2019EA000960},
-abstract = {Abstract Quantifying plastic pollution on surface water is essential to understand and mitigate the impact of plastic pollution to the environment. Current monitoring methods such as visual counting are labor intensive. This limits the feasibility of scaling to long-term monitoring at multiple locations. We present an automated method for monitoring plastic pollution that overcomes this limitation. Floating macroplastics are detected from images of the water surface using deep learning. We perform an experimental evaluation of our method using images from bridge-mounted cameras at five different river locations across Jakarta, Indonesia. The four main results of the experimental evaluation are as follows. First, we realize a method that obtains a reliable estimate of plastic density (68.7\% precision). Our monitoring method successfully distinguishes plastics from environmental elements, such as water surface reflection and organic waste. Second, when trained on one location, the method generalizes well to new locations with relatively similar conditions without retraining (≈50\% average precision). Third, generalization to new locations with considerably different conditions can be boosted by retraining on only 50 objects of the new location (improving precision from ≈20\% to ≈42\%). Fourth, our method matches visual counting methods and detects ≈35\% more plastics, even more so during periods of plastic transport rates of above 10 items per meter per minute. Taken together, these results demonstrate that our method is a promising way of monitoring plastic pollution. By extending the variety of the data set the monitoring method can be readily applied at a larger scale.},
-year = {2020}
-}
-
-@misc{WADE AI:2020,
- author = {K. Kerge, W. Cowger, K. Haamer, K. Ehala, K. Kivistik, T. Tammiste, M. Vares},
- title = {WADE AI Trash Detection},
- year = {2020},
- publisher = {GitHub},
- journal = {GitHub repository},
- url = {https://github.com/letsdoitworld/wade-ai}
-}
-
-@misc{Wuu:2018,
- author = {S. Wuu},
- title = {Litter Detection Tensorflow},
- year = {2018},
- publisher = {GitHub},
- journal = {GitHub repository},
- url = {https://github.com/isaychris/litter-detection-tensorflow}
-}
-
-@misc{Waterboards:2018,
- author = {California State Water Resources Control Board},
- title = {Trash Tracker},
- year = {2018},
- publisher = {GitHub},
- journal = {GitHub repository},
- url = {https://github.com/CAWaterBoardDataCenter/Trash-Tracker}
-}
-
-
-@ARTICLE{Hapich:2022,
- title = "Trash Taxonomy Tool: harmonizing classification systems used to
- describe trash in environments",
- author = "Hapich, Hannah and Cowger, Win and Gray, Andrew and Tangri, Neil
- and Hale, Tony and Magdy, Amr and Vermilye, Antoinette and Yu,
- Walter and Ayres, Dick and Moore, Charles and Vermilye, John and
- Singh, Samiksha and Haiman, Aaron N K and Youngblood, Kathryn and
- Kang, Yunfan and McCauley, Margaret and Lok, Trevor and Moore,
- Shelly and Baggs, Eric and Lippiatt, Sherry and Kohler, Peter and
- Conley, Gary and Taing, Janna and Mock, Jeremiah",
- abstract = "Despite global efforts to monitor, mitigate against, and prevent
- trash (mismanaged solid waste) pollution, no harmonized trash
- typology system has been widely adopted worldwide. This impedes
- the merging of datasets and comparative analyses. We addressed
- this problem by 1) assessing the state of trash typology and
- comparability, 2) developing a standardized and harmonized
- framework of relational tables and tools, and 3) informing
- practitioners about challenges and potential solutions. We
- analyzed 68 trash survey lists to assess similarities and
- differences in classification. We created comprehensive
- harmonized hierarchical tables and alias tables for item and
- material classes. On average, the 68 survey lists had 20.8\% of
- item classes in common and 29.9\% of material classes in common.
- Multiple correspondence analysis showed that the 68 surveys were
- not significantly different regarding organization type,
- ecosystem focus, or substrate focus. We built the Trash Taxonomy
- Tool (TTT) web-based application with query features and open
- access at openanalysis.org/trashtaxonomy. The TTT can be applied
- to improve, create, and compare trash surveys, and provides
- practitioners with tools to integrate datasets and maximize
- comparability. The use of TTT will ultimately facilitate
- improvements in assessing trends across space and time,
- identifying targets for mitigation, evaluating the effectiveness
- of prevention measures, informing policymaking, and holding
- producers responsible.",
- journal = "Microplastics and Nanoplastics",
- volume = 2,
- number = 1,
- pages = "15",
- month = jun,
- year = 2022
-}
-
-
-@ARTICLE{Lynch:2018,
- title = "{OpenLitterMap.com} -- Open Data on Plastic Pollution with
- Blockchain Rewards (Littercoin)",
- author = "Lynch, Se{\'a}n",
- abstract = "OpenLitterMap rewards users with Littercoin for producing open
- data on litter. Open data on the geospatial characteristics of
- litter provide means of invoking and evaluating responses to
- plastic pollution. OpenLitterMap currently works as a web app on
- all devices with native mobile apps in development. The stack
- includes the integration of the Laravel PHP Framework on the
- backend; Vue for frontend reactivity; NativeScript-Vue for mobile
- apps; Bulma for CSS; Leaflet for web-mapping; Turf.js for
- geospatial analysis; the Ethereum Blockchain for tokenization;
- Stripe; ChartJS; AWS; and more. Anywhere from a single cigarette
- butt to the contents of an entire beach or street clean can be
- logged in a single geotagged photo. Alternatively, a simple index
- may be used if litter is incalculable. The open data includes an
- increasing 100+ pre-defined types of litter; 20+ corporate
- brands; verification status; coordinates; timestamp; phone model;
- the latest OpenStreetMap address at each location; and the litter
- presence as a Boolean. To date, 100\% of all submitted data (~
- 8200 photos, ~ 28,000 litter from over 150 contributors) has been
- manually verified which is being used to develop machine learning
- algorithms.",
- journal = "Open Geospatial Data, Software and Standards",
- volume = 3,
- number = 1,
- pages = "6",
- month = jun,
- year = 2018
-}
-
-@article{Majchrowska:2022,
-title = {Deep learning-based waste detection in natural and urban environments},
-journal = {Waste Management},
-volume = {138},
-pages = {274-284},
-year = {2022},
-issn = {0956-053X},
-doi = {https://doi.org/10.1016/j.wasman.2021.12.001},
-url = {https://www.sciencedirect.com/science/article/pii/S0956053X21006474},
-author = {Sylwia Majchrowska and Agnieszka Mikołajczyk and Maria Ferlin and Zuzanna Klawikowska and Marta A. Plantykow and Arkadiusz Kwasigroch and Karol Majek},
-keywords = {Object detection, Semi-supervised learning, Waste classification benchmarks, Waste detection benchmarks, Waste localization, Waste recognition},
-abstract = {Waste pollution is one of the most significant environmental issues in the modern world. The importance of recycling is well known, both for economic and ecological reasons, and the industry demands high efficiency. Current studies towards automatic waste detection are hardly comparable due to the lack of benchmarks and widely accepted standards regarding the used metrics and data. Those problems are addressed in this article by providing a critical analysis of over ten existing waste datasets and a brief but constructive review of the existing Deep Learning-based waste detection approaches. This article collects and summarizes previous studies and provides the results of authors’ experiments on the presented datasets, all intended to create a first replicable baseline for litter detection. Moreover, new benchmark datasets detect-waste and classify-waste are proposed that are merged collections from the above-mentioned open-source datasets with unified annotations covering all possible waste categories: bio, glass, metal and plastic, non-recyclable, other, paper, and unknown. Finally, a two-stage detector for litter localization and classification is presented. EfficientDet-D2 is used to localize litter, and EfficientNet-B2 to classify the detected waste into seven categories. The classifier is trained in a semi-supervised fashion making the use of unlabeled images. The proposed approach achieves up to 70% of average precision in waste detection and around 75% of classification accuracy on the test dataset. The code and annotations used in the studies are publicly available online11https://github.com/wimlds-trojmiasto/detect-waste..}
-}
-
-@misc{Proença:2020,
- doi = {10.48550/ARXIV.2003.06975},
-
- url = {https://arxiv.org/abs/2003.06975},
-
- author = {Proença, Pedro F and Simões, Pedro},
-
- keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
-
- title = {TACO: Trash Annotations in Context for Litter Detection},
-
- publisher = {arXiv},
-
- year = {2020},
-
- copyright = {arXiv.org perpetual, non-exclusive license}
-}
-
-
-@TECHREPORT{Moore:2020,
- title = "California Trash Monitoring Methods and Assessments Playbook",
- author = "Moore, Shelly and Hale, Tony and Weisberg, Stephen B and
- Flores, Lorenzo and Kauhanen, Pete",
- institution = "San Francisco Estuary Institute",
- year = 2020
-}
diff --git a/paperbibtest.bib b/paperbibtest.bib
new file mode 100644
index 0000000..d145673
--- /dev/null
+++ b/paperbibtest.bib
@@ -0,0 +1,153 @@
+@article{van Lieshout:2020,
+author = {van Lieshout, Colin and van Oeveren, Kees and van Emmerik, Tim and Postma, Eric},
+title = {Automated River Plastic Monitoring Using Deep Learning and Cameras},
+journal = {Earth and Space Science},
+volume = {7},
+number = {8},
+pages = {e2019EA000960},
+keywords = {plastic pollution, object detection, automated monitoring, deep learning, artificial intelligence, river plastic},
+doi = {https://doi.org/10.1029/2019EA000960},
+url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2019EA000960},
+eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2019EA000960},
+note = {e2019EA000960 10.1029/2019EA000960},
+abstract = {Abstract Quantifying plastic pollution on surface water is essential to understand and mitigate the impact of plastic pollution to the environment. Current monitoring methods such as visual counting are labor intensive. This limits the feasibility of scaling to long-term monitoring at multiple locations. We present an automated method for monitoring plastic pollution that overcomes this limitation. Floating macroplastics are detected from images of the water surface using deep learning. We perform an experimental evaluation of our method using images from bridge-mounted cameras at five different river locations across Jakarta, Indonesia. The four main results of the experimental evaluation are as follows. First, we realize a method that obtains a reliable estimate of plastic density (68.7\% precision). Our monitoring method successfully distinguishes plastics from environmental elements, such as water surface reflection and organic waste. Second, when trained on one location, the method generalizes well to new locations with relatively similar conditions without retraining (≈50\% average precision). Third, generalization to new locations with considerably different conditions can be boosted by retraining on only 50 objects of the new location (improving precision from ≈20\% to ≈42\%). Fourth, our method matches visual counting methods and detects ≈35\% more plastics, even more so during periods of plastic transport rates of above 10 items per meter per minute. Taken together, these results demonstrate that our method is a promising way of monitoring plastic pollution. By extending the variety of the data set the monitoring method can be readily applied at a larger scale.},
+year = {2020}
+}
+
+@misc{WADE AI:2020,
+ author = {K. Kerge, W. Cowger, K. Haamer, K. Ehala, K. Kivistik, T. Tammiste, M. Vares},
+ title = {WADE AI Trash Detection},
+ year = {2020},
+ publisher = {GitHub},
+ journal = {GitHub repository},
+ url = {https://github.com/letsdoitworld/wade-ai}
+}
+
+@misc{Wuu:2018,
+ author = {S. Wuu},
+ title = {Litter Detection Tensorflow},
+ year = {2018},
+ publisher = {GitHub},
+ journal = {GitHub repository},
+ url = {https://github.com/isaychris/litter-detection-tensorflow}
+}
+
+@misc{Waterboards:2018,
+ author = {California State Water Resources Control Board},
+ title = {Trash Tracker},
+ year = {2018},
+ publisher = {GitHub},
+ journal = {GitHub repository},
+ url = {https://github.com/CAWaterBoardDataCenter/Trash-Tracker}
+}
+
+
+@ARTICLE{Hapich:2022,
+ title = "Trash Taxonomy Tool: harmonizing classification systems used to
+ describe trash in environments",
+ author = "Hapich, Hannah and Cowger, Win and Gray, Andrew and Tangri, Neil
+ and Hale, Tony and Magdy, Amr and Vermilye, Antoinette and Yu,
+ Walter and Ayres, Dick and Moore, Charles and Vermilye, John and
+ Singh, Samiksha and Haiman, Aaron N K and Youngblood, Kathryn and
+ Kang, Yunfan and McCauley, Margaret and Lok, Trevor and Moore,
+ Shelly and Baggs, Eric and Lippiatt, Sherry and Kohler, Peter and
+ Conley, Gary and Taing, Janna and Mock, Jeremiah",
+ abstract = "Despite global efforts to monitor, mitigate against, and prevent
+ trash (mismanaged solid waste) pollution, no harmonized trash
+ typology system has been widely adopted worldwide. This impedes
+ the merging of datasets and comparative analyses. We addressed
+ this problem by 1) assessing the state of trash typology and
+ comparability, 2) developing a standardized and harmonized
+ framework of relational tables and tools, and 3) informing
+ practitioners about challenges and potential solutions. We
+ analyzed 68 trash survey lists to assess similarities and
+ differences in classification. We created comprehensive
+ harmonized hierarchical tables and alias tables for item and
+ material classes. On average, the 68 survey lists had 20.8\% of
+ item classes in common and 29.9\% of material classes in common.
+ Multiple correspondence analysis showed that the 68 surveys were
+ not significantly different regarding organization type,
+ ecosystem focus, or substrate focus. We built the Trash Taxonomy
+ Tool (TTT) web-based application with query features and open
+ access at openanalysis.org/trashtaxonomy. The TTT can be applied
+ to improve, create, and compare trash surveys, and provides
+ practitioners with tools to integrate datasets and maximize
+ comparability. The use of TTT will ultimately facilitate
+ improvements in assessing trends across space and time,
+ identifying targets for mitigation, evaluating the effectiveness
+ of prevention measures, informing policymaking, and holding
+ producers responsible.",
+ journal = "Microplastics and Nanoplastics",
+ volume = 2,
+ number = 1,
+ pages = "15",
+ month = jun,
+ year = 2022
+}
+
+
+@ARTICLE{Lynch:2018,
+ title = "{OpenLitterMap.com} -- Open Data on Plastic Pollution with
+ Blockchain Rewards (Littercoin)",
+ author = "Lynch, Se{\'a}n",
+ abstract = "OpenLitterMap rewards users with Littercoin for producing open
+ data on litter. Open data on the geospatial characteristics of
+ litter provide means of invoking and evaluating responses to
+ plastic pollution. OpenLitterMap currently works as a web app on
+ all devices with native mobile apps in development. The stack
+ includes the integration of the Laravel PHP Framework on the
+ backend; Vue for frontend reactivity; NativeScript-Vue for mobile
+ apps; Bulma for CSS; Leaflet for web-mapping; Turf.js for
+ geospatial analysis; the Ethereum Blockchain for tokenization;
+ Stripe; ChartJS; AWS; and more. Anywhere from a single cigarette
+ butt to the contents of an entire beach or street clean can be
+ logged in a single geotagged photo. Alternatively, a simple index
+ may be used if litter is incalculable. The open data includes an
+ increasing 100+ pre-defined types of litter; 20+ corporate
+ brands; verification status; coordinates; timestamp; phone model;
+ the latest OpenStreetMap address at each location; and the litter
+ presence as a Boolean. To date, 100\% of all submitted data (~
+ 8200 photos, ~ 28,000 litter from over 150 contributors) has been
+ manually verified which is being used to develop machine learning
+ algorithms.",
+ journal = "Open Geospatial Data, Software and Standards",
+ volume = 3,
+ number = 1,
+ pages = "6",
+ month = jun,
+ year = 2018
+}
+
+@article{Majchrowska:2022,
+title = {Deep learning-based waste detection in natural and urban environments},
+journal = {Waste Management},
+volume = {138},
+pages = {274-284},
+year = {2022},
+issn = {0956-053X},
+doi = {https://doi.org/10.1016/j.wasman.2021.12.001},
+url = {https://www.sciencedirect.com/science/article/pii/S0956053X21006474},
+author = {Sylwia Majchrowska and Agnieszka Mikołajczyk and Maria Ferlin and Zuzanna Klawikowska and Marta A. Plantykow and Arkadiusz Kwasigroch and Karol Majek},
+keywords = {Object detection, Semi-supervised learning, Waste classification benchmarks, Waste detection benchmarks, Waste localization, Waste recognition},
+abstract = {Waste pollution is one of the most significant environmental issues in the modern world. The importance of recycling is well known, both for economic and ecological reasons, and the industry demands high efficiency. Current studies towards automatic waste detection are hardly comparable due to the lack of benchmarks and widely accepted standards regarding the used metrics and data. Those problems are addressed in this article by providing a critical analysis of over ten existing waste datasets and a brief but constructive review of the existing Deep Learning-based waste detection approaches. This article collects and summarizes previous studies and provides the results of authors’ experiments on the presented datasets, all intended to create a first replicable baseline for litter detection. Moreover, new benchmark datasets detect-waste and classify-waste are proposed that are merged collections from the above-mentioned open-source datasets with unified annotations covering all possible waste categories: bio, glass, metal and plastic, non-recyclable, other, paper, and unknown. Finally, a two-stage detector for litter localization and classification is presented. EfficientDet-D2 is used to localize litter, and EfficientNet-B2 to classify the detected waste into seven categories. The classifier is trained in a semi-supervised fashion making the use of unlabeled images. The proposed approach achieves up to 70% of average precision in waste detection and around 75% of classification accuracy on the test dataset. The code and annotations used in the studies are publicly available online11https://github.com/wimlds-trojmiasto/detect-waste..}
+}
+
+@misc{Proença:2020,
+ doi = {10.48550/ARXIV.2003.06975},
+ url = {https://arxiv.org/abs/2003.06975},
+ author = {Proença, Pedro F and Simões, Pedro},
+ keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
+ title = {TACO: Trash Annotations in Context for Litter Detection},
+ publisher = {arXiv},
+ year = {2020},
+ copyright = {arXiv.org perpetual, non-exclusive license}
+}
+
+
+@TECHREPORT{Moore:2020,
+ title = "California Trash Monitoring Methods and Assessments Playbook",
+ author = "Moore, Shelly and Hale, Tony and Weisberg, Stephen B and
+ Flores, Lorenzo and Kauhanen, Pete",
+ institution = "San Francisco Estuary Institute",
+ year = 2020
+}
From b458d5541d89300d1fcf8013e0f4028c2cd690f1 Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 5 Sep 2022 15:31:49 -0700
Subject: [PATCH 18/73] test citation style
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index f3cc42a..fc4f315 100644
--- a/paper.md
+++ b/paper.md
@@ -63,7 +63,7 @@ Figure sizes can be customized by adding an optional second parameter:
Trash AI is trained on the [TACO dataset](http://tacodataset.org/) using [YOLO 5](pytorch.org). Trash AI stores images in [IndexDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API) to keep the data primarily browser side and uses [tensorflow.js](https://www.tensorflow.org/js) to keep analysis browser side too. When images are uploaded to the browser, Trash AI provides the prediction of the model as a graphical output. The raw data from the model and labeled images can be downloaded in a batch download to expedite analyses. Any data uploaded to the platform is automatically saved to an [S3 bucket](https://aws.amazon.com/s3/) which we can use to improve the model over time.
## AI Training
-The AI model was developed starting with the TACO dataset which was available with a complimentary Jupyter Notebook on Kaggle (https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced which used the default YOLO v5 model `[@Jocher:2020]` as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
+The AI model was developed starting with the TACO dataset which was available with a complimentary Jupyter Notebook on Kaggle (https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced which used the default `YOLO v5 model` [@Jocher:2020] as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
## Limitations
From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g. a drink bottle on someone's desk vs in the forest laying on the ground). This and other complexities to trash classification make a general trash AI a challenging (yet worthwhile) long term endeavor. The algorithm is primarily trained on single pieces of trash in the image with the trash laying on the ground and the model will likely excel for that use case currently.
From 1140afcb12a13e9bb214cdae054a8dc21e6b56cd Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 5 Sep 2022 15:36:18 -0700
Subject: [PATCH 19/73] add citations
---
paper.bib | 60 ++++++++++++++++++++++++++++++++++++++++++++++++
paper.md | 6 ++---
paperbibtest.bib | 58 +---------------------------------------------
3 files changed, 64 insertions(+), 60 deletions(-)
diff --git a/paper.bib b/paper.bib
index 0991b16..a622109 100644
--- a/paper.bib
+++ b/paper.bib
@@ -40,6 +40,66 @@ @software{Jocher:2020
url = {https://doi.org/10.5281/zenodo.4154370}
}
+@TECHREPORT{Moore:2020,
+ title = "California Trash Monitoring Methods and Assessments Playbook",
+ author = "Moore, Shelly and Hale, Tony and Weisberg, Stephen B and
+ Flores, Lorenzo and Kauhanen, Pete",
+ institution = "San Francisco Estuary Institute",
+ year = 2020
+}
+
+@ARTICLE{Hapich:2022,
+ title = "Trash Taxonomy Tool: harmonizing classification systems used to
+ describe trash in environments",
+ author = "Hapich, Hannah and Cowger, Win and Gray, Andrew and Tangri, Neil
+ and Hale, Tony and Magdy, Amr and Vermilye, Antoinette and Yu,
+ Walter and Ayres, Dick and Moore, Charles and Vermilye, John and
+ Singh, Samiksha and Haiman, Aaron N K and Youngblood, Kathryn and
+ Kang, Yunfan and McCauley, Margaret and Lok, Trevor and Moore,
+ Shelly and Baggs, Eric and Lippiatt, Sherry and Kohler, Peter and
+ Conley, Gary and Taing, Janna and Mock, Jeremiah",
+ abstract = "Despite global efforts to monitor, mitigate against, and prevent
+ trash (mismanaged solid waste) pollution, no harmonized trash
+ typology system has been widely adopted worldwide. This impedes
+ the merging of datasets and comparative analyses. We addressed
+ this problem by 1) assessing the state of trash typology and
+ comparability, 2) developing a standardized and harmonized
+ framework of relational tables and tools, and 3) informing
+ practitioners about challenges and potential solutions. We
+ analyzed 68 trash survey lists to assess similarities and
+ differences in classification. We created comprehensive
+ harmonized hierarchical tables and alias tables for item and
+ material classes. On average, the 68 survey lists had 20.8\% of
+ item classes in common and 29.9\% of material classes in common.
+ Multiple correspondence analysis showed that the 68 surveys were
+ not significantly different regarding organization type,
+ ecosystem focus, or substrate focus. We built the Trash Taxonomy
+ Tool (TTT) web-based application with query features and open
+ access at openanalysis.org/trashtaxonomy. The TTT can be applied
+ to improve, create, and compare trash surveys, and provides
+ practitioners with tools to integrate datasets and maximize
+ comparability. The use of TTT will ultimately facilitate
+ improvements in assessing trends across space and time,
+ identifying targets for mitigation, evaluating the effectiveness
+ of prevention measures, informing policymaking, and holding
+ producers responsible.",
+ journal = "Microplastics and Nanoplastics",
+ volume = 2,
+ number = 1,
+ pages = "15",
+ month = jun,
+ year = 2022
+}
+
+@misc{Waterboards:2018,
+ author = {California State Water Resources Control Board},
+ title = {Trash Tracker},
+ year = {2018},
+ publisher = {GitHub},
+ journal = {GitHub repository},
+ url = {https://github.com/CAWaterBoardDataCenter/Trash-Tracker}
+}
+
diff --git a/paper.md b/paper.md
index fc4f315..bed916a 100644
--- a/paper.md
+++ b/paper.md
@@ -46,7 +46,7 @@ Although computer vision classification routines have been created for trash, th
# Statement of need
-The trash in the environment is a widespread problem that is difficult to measure. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an efficient yet effective manner `[@Majchrowska:2022; @Proença:2020; @Moore:2020; @van Lieshout:2020; @WADE AI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]`. An app-based reporting of trash using cell phones, laptops, and other devices has been a valuable solution `[@Lynch:2018]`. Applications for AI in detecting trash currently include: images from bridges `[@van Lieshout:2020]`, drone imaging `[@Moore:2020]`, cameras on street sweepers `[@Waterboards:2018]`, and cell phone app based reporting of trash `[@Lynch:2018]`. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average data scientist. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
+The trash in the environment is a widespread problem that is difficult to measure. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an efficient yet effective manner `[@Majchrowska:2022; @Proença:2020; @Moore:2020; @van Lieshout:2020; @WADE AI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]`. An app-based reporting of trash using cell phones, laptops, and other devices has been a valuable solution `[@Lynch:2018]`. Applications for AI in detecting trash currently include: images from bridges `[@van Lieshout:2020]`, `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of trash `[@Lynch:2018]`. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average data scientist. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
# Example
Video
@@ -63,7 +63,7 @@ Figure sizes can be customized by adding an optional second parameter:
Trash AI is trained on the [TACO dataset](http://tacodataset.org/) using [YOLO 5](pytorch.org). Trash AI stores images in [IndexDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API) to keep the data primarily browser side and uses [tensorflow.js](https://www.tensorflow.org/js) to keep analysis browser side too. When images are uploaded to the browser, Trash AI provides the prediction of the model as a graphical output. The raw data from the model and labeled images can be downloaded in a batch download to expedite analyses. Any data uploaded to the platform is automatically saved to an [S3 bucket](https://aws.amazon.com/s3/) which we can use to improve the model over time.
## AI Training
-The AI model was developed starting with the TACO dataset which was available with a complimentary Jupyter Notebook on Kaggle (https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced which used the default `YOLO v5 model` [@Jocher:2020] as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
+The AI model was developed starting with the TACO dataset which was available with a complimentary Jupyter Notebook on [Kaggle](https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced which used the default `YOLO v5 model` [@Jocher:2020] as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
## Limitations
From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g. a drink bottle on someone's desk vs in the forest laying on the ground). This and other complexities to trash classification make a general trash AI a challenging (yet worthwhile) long term endeavor. The algorithm is primarily trained on single pieces of trash in the image with the trash laying on the ground and the model will likely excel for that use case currently.
@@ -72,7 +72,7 @@ From our experience, the accuracy of the model varies depending on the quality o
Trash AI is hosted on the web at www.trashai.org. The source code is [available on github](https://github.com/code4sac/trash-ai) with an [MIT license](https://mit-license.org/). The source code can be run offline on any machine that can install [Docker and Docker-compose](www.docker.com). [Documentation](https://github.com/code4sac/trash-ai#ai-for-litter-detection-web-application) is maintained by Code for Sacramento on Github and will be updated with each release. The image datasets shared to the tool are in an S3 Bucket that needs to be reviewed before being shared with others due to security and moderation concerns but can be acquired by [contacting the repo maintaniers](https://github.com/code4sac/trash-ai/graphs/contributors).
# Future Goals
-This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get the data that people share to our S3 bucket into the [TACO training dataset](http://tacodataset.org/) and trained model. Future models will expand the annotations to include the Trash Taxonomy `[@Hapich:2022]` classes and add an option to choose between other models besides the current model.
+This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get the data that people share to our S3 bucket into the [TACO training dataset](http://tacodataset.org/) and trained model. Future models will expand the annotations to include the `Trash Taxonomy` [@Hapich:2022] classes and add an option to choose between other models besides the current model.
# Acknowledgements
Code for Sacramento led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento team, part of code for America, without whom this project would not have been possible and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. We acknowledge financial support from McPike Zima Charitable Foundation.
diff --git a/paperbibtest.bib b/paperbibtest.bib
index d145673..3870a12 100644
--- a/paperbibtest.bib
+++ b/paperbibtest.bib
@@ -32,58 +32,9 @@ @misc{Wuu:2018
url = {https://github.com/isaychris/litter-detection-tensorflow}
}
-@misc{Waterboards:2018,
- author = {California State Water Resources Control Board},
- title = {Trash Tracker},
- year = {2018},
- publisher = {GitHub},
- journal = {GitHub repository},
- url = {https://github.com/CAWaterBoardDataCenter/Trash-Tracker}
-}
-@ARTICLE{Hapich:2022,
- title = "Trash Taxonomy Tool: harmonizing classification systems used to
- describe trash in environments",
- author = "Hapich, Hannah and Cowger, Win and Gray, Andrew and Tangri, Neil
- and Hale, Tony and Magdy, Amr and Vermilye, Antoinette and Yu,
- Walter and Ayres, Dick and Moore, Charles and Vermilye, John and
- Singh, Samiksha and Haiman, Aaron N K and Youngblood, Kathryn and
- Kang, Yunfan and McCauley, Margaret and Lok, Trevor and Moore,
- Shelly and Baggs, Eric and Lippiatt, Sherry and Kohler, Peter and
- Conley, Gary and Taing, Janna and Mock, Jeremiah",
- abstract = "Despite global efforts to monitor, mitigate against, and prevent
- trash (mismanaged solid waste) pollution, no harmonized trash
- typology system has been widely adopted worldwide. This impedes
- the merging of datasets and comparative analyses. We addressed
- this problem by 1) assessing the state of trash typology and
- comparability, 2) developing a standardized and harmonized
- framework of relational tables and tools, and 3) informing
- practitioners about challenges and potential solutions. We
- analyzed 68 trash survey lists to assess similarities and
- differences in classification. We created comprehensive
- harmonized hierarchical tables and alias tables for item and
- material classes. On average, the 68 survey lists had 20.8\% of
- item classes in common and 29.9\% of material classes in common.
- Multiple correspondence analysis showed that the 68 surveys were
- not significantly different regarding organization type,
- ecosystem focus, or substrate focus. We built the Trash Taxonomy
- Tool (TTT) web-based application with query features and open
- access at openanalysis.org/trashtaxonomy. The TTT can be applied
- to improve, create, and compare trash surveys, and provides
- practitioners with tools to integrate datasets and maximize
- comparability. The use of TTT will ultimately facilitate
- improvements in assessing trends across space and time,
- identifying targets for mitigation, evaluating the effectiveness
- of prevention measures, informing policymaking, and holding
- producers responsible.",
- journal = "Microplastics and Nanoplastics",
- volume = 2,
- number = 1,
- pages = "15",
- month = jun,
- year = 2022
-}
+
@ARTICLE{Lynch:2018,
@@ -144,10 +95,3 @@ @misc{Proença:2020
}
-@TECHREPORT{Moore:2020,
- title = "California Trash Monitoring Methods and Assessments Playbook",
- author = "Moore, Shelly and Hale, Tony and Weisberg, Stephen B and
- Flores, Lorenzo and Kauhanen, Pete",
- institution = "San Francisco Estuary Institute",
- year = 2020
-}
From b8cbe585901aab4732d6a8df37e7cab20382b551 Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 5 Sep 2022 15:43:36 -0700
Subject: [PATCH 20/73] add a few more citations
---
paper.bib | 49 ++++++++++++++++++++++++++++++++++++++++++++-
paper.md | 2 +-
paperbibtest.bib | 52 +-----------------------------------------------
3 files changed, 50 insertions(+), 53 deletions(-)
diff --git a/paper.bib b/paper.bib
index a622109..15dac2b 100644
--- a/paper.bib
+++ b/paper.bib
@@ -92,7 +92,7 @@ @ARTICLE{Hapich:2022
}
@misc{Waterboards:2018,
- author = {California State Water Resources Control Board},
+ author = {Waterboards},
title = {Trash Tracker},
year = {2018},
publisher = {GitHub},
@@ -100,6 +100,53 @@ @misc{Waterboards:2018
url = {https://github.com/CAWaterBoardDataCenter/Trash-Tracker}
}
+@article{van Lieshout:2020,
+ author = {van Lieshout, Colin and van Oeveren, Kees and van Emmerik, Tim and Postma, Eric},
+ title = {Automated River Plastic Monitoring Using Deep Learning and Cameras},
+ journal = {Earth and Space Science},
+ volume = {7},
+ number = {8},
+ pages = {e2019EA000960},
+ keywords = {plastic pollution, object detection, automated monitoring, deep learning, artificial intelligence, river plastic},
+ doi = {https://doi.org/10.1029/2019EA000960},
+ url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2019EA000960},
+ eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2019EA000960},
+ note = {e2019EA000960 10.1029/2019EA000960},
+ abstract = {Abstract Quantifying plastic pollution on surface water is essential to understand and mitigate the impact of plastic pollution to the environment. Current monitoring methods such as visual counting are labor intensive. This limits the feasibility of scaling to long-term monitoring at multiple locations. We present an automated method for monitoring plastic pollution that overcomes this limitation. Floating macroplastics are detected from images of the water surface using deep learning. We perform an experimental evaluation of our method using images from bridge-mounted cameras at five different river locations across Jakarta, Indonesia. The four main results of the experimental evaluation are as follows. First, we realize a method that obtains a reliable estimate of plastic density (68.7\% precision). Our monitoring method successfully distinguishes plastics from environmental elements, such as water surface reflection and organic waste. Second, when trained on one location, the method generalizes well to new locations with relatively similar conditions without retraining (≈50\% average precision). Third, generalization to new locations with considerably different conditions can be boosted by retraining on only 50 objects of the new location (improving precision from ≈20\% to ≈42\%). Fourth, our method matches visual counting methods and detects ≈35\% more plastics, even more so during periods of plastic transport rates of above 10 items per meter per minute. Taken together, these results demonstrate that our method is a promising way of monitoring plastic pollution. By extending the variety of the data set the monitoring method can be readily applied at a larger scale.},
+ year = {2020}
+}
+
+@ARTICLE{Lynch:2018,
+ title = "{OpenLitterMap.com} -- Open Data on Plastic Pollution with
+ Blockchain Rewards (Littercoin)",
+ author = "Lynch, Se{\'a}n",
+ abstract = "OpenLitterMap rewards users with Littercoin for producing open
+ data on litter. Open data on the geospatial characteristics of
+ litter provide means of invoking and evaluating responses to
+ plastic pollution. OpenLitterMap currently works as a web app on
+ all devices with native mobile apps in development. The stack
+ includes the integration of the Laravel PHP Framework on the
+ backend; Vue for frontend reactivity; NativeScript-Vue for mobile
+ apps; Bulma for CSS; Leaflet for web-mapping; Turf.js for
+ geospatial analysis; the Ethereum Blockchain for tokenization;
+ Stripe; ChartJS; AWS; and more. Anywhere from a single cigarette
+ butt to the contents of an entire beach or street clean can be
+ logged in a single geotagged photo. Alternatively, a simple index
+ may be used if litter is incalculable. The open data includes an
+ increasing 100+ pre-defined types of litter; 20+ corporate
+ brands; verification status; coordinates; timestamp; phone model;
+ the latest OpenStreetMap address at each location; and the litter
+ presence as a Boolean. To date, 100\% of all submitted data (~
+ 8200 photos, ~ 28,000 litter from over 150 contributors) has been
+ manually verified which is being used to develop machine learning
+ algorithms.",
+ journal = "Open Geospatial Data, Software and Standards",
+ volume = 3,
+ number = 1,
+ pages = "6",
+ month = jun,
+ year = 2018
+}
diff --git a/paper.md b/paper.md
index bed916a..8a114e0 100644
--- a/paper.md
+++ b/paper.md
@@ -46,7 +46,7 @@ Although computer vision classification routines have been created for trash, th
# Statement of need
-The trash in the environment is a widespread problem that is difficult to measure. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an efficient yet effective manner `[@Majchrowska:2022; @Proença:2020; @Moore:2020; @van Lieshout:2020; @WADE AI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]`. An app-based reporting of trash using cell phones, laptops, and other devices has been a valuable solution `[@Lynch:2018]`. Applications for AI in detecting trash currently include: images from bridges `[@van Lieshout:2020]`, `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of trash `[@Lynch:2018]`. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average data scientist. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
+The trash in the environment is a widespread problem that is difficult to measure. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an efficient yet effective manner `[@Majchrowska:2022; @Proença:2020; @Moore:2020; @van Lieshout:2020; @WADE AI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]`. An app-based reporting of trash using cell phones, laptops, and other devices has been a valuable solution `[@Lynch:2018]`. Applications for AI in detecting trash currently include: images from `bridges` [@van Lieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average data scientist. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
# Example
Video
diff --git a/paperbibtest.bib b/paperbibtest.bib
index 3870a12..f6c6669 100644
--- a/paperbibtest.bib
+++ b/paperbibtest.bib
@@ -1,18 +1,4 @@
-@article{van Lieshout:2020,
-author = {van Lieshout, Colin and van Oeveren, Kees and van Emmerik, Tim and Postma, Eric},
-title = {Automated River Plastic Monitoring Using Deep Learning and Cameras},
-journal = {Earth and Space Science},
-volume = {7},
-number = {8},
-pages = {e2019EA000960},
-keywords = {plastic pollution, object detection, automated monitoring, deep learning, artificial intelligence, river plastic},
-doi = {https://doi.org/10.1029/2019EA000960},
-url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2019EA000960},
-eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2019EA000960},
-note = {e2019EA000960 10.1029/2019EA000960},
-abstract = {Abstract Quantifying plastic pollution on surface water is essential to understand and mitigate the impact of plastic pollution to the environment. Current monitoring methods such as visual counting are labor intensive. This limits the feasibility of scaling to long-term monitoring at multiple locations. We present an automated method for monitoring plastic pollution that overcomes this limitation. Floating macroplastics are detected from images of the water surface using deep learning. We perform an experimental evaluation of our method using images from bridge-mounted cameras at five different river locations across Jakarta, Indonesia. The four main results of the experimental evaluation are as follows. First, we realize a method that obtains a reliable estimate of plastic density (68.7\% precision). Our monitoring method successfully distinguishes plastics from environmental elements, such as water surface reflection and organic waste. Second, when trained on one location, the method generalizes well to new locations with relatively similar conditions without retraining (≈50\% average precision). Third, generalization to new locations with considerably different conditions can be boosted by retraining on only 50 objects of the new location (improving precision from ≈20\% to ≈42\%). Fourth, our method matches visual counting methods and detects ≈35\% more plastics, even more so during periods of plastic transport rates of above 10 items per meter per minute. Taken together, these results demonstrate that our method is a promising way of monitoring plastic pollution. By extending the variety of the data set the monitoring method can be readily applied at a larger scale.},
-year = {2020}
-}
+
@misc{WADE AI:2020,
author = {K. Kerge, W. Cowger, K. Haamer, K. Ehala, K. Kivistik, T. Tammiste, M. Vares},
@@ -33,42 +19,6 @@ @misc{Wuu:2018
}
-
-
-
-
-@ARTICLE{Lynch:2018,
- title = "{OpenLitterMap.com} -- Open Data on Plastic Pollution with
- Blockchain Rewards (Littercoin)",
- author = "Lynch, Se{\'a}n",
- abstract = "OpenLitterMap rewards users with Littercoin for producing open
- data on litter. Open data on the geospatial characteristics of
- litter provide means of invoking and evaluating responses to
- plastic pollution. OpenLitterMap currently works as a web app on
- all devices with native mobile apps in development. The stack
- includes the integration of the Laravel PHP Framework on the
- backend; Vue for frontend reactivity; NativeScript-Vue for mobile
- apps; Bulma for CSS; Leaflet for web-mapping; Turf.js for
- geospatial analysis; the Ethereum Blockchain for tokenization;
- Stripe; ChartJS; AWS; and more. Anywhere from a single cigarette
- butt to the contents of an entire beach or street clean can be
- logged in a single geotagged photo. Alternatively, a simple index
- may be used if litter is incalculable. The open data includes an
- increasing 100+ pre-defined types of litter; 20+ corporate
- brands; verification status; coordinates; timestamp; phone model;
- the latest OpenStreetMap address at each location; and the litter
- presence as a Boolean. To date, 100\% of all submitted data (~
- 8200 photos, ~ 28,000 litter from over 150 contributors) has been
- manually verified which is being used to develop machine learning
- algorithms.",
- journal = "Open Geospatial Data, Software and Standards",
- volume = 3,
- number = 1,
- pages = "6",
- month = jun,
- year = 2018
-}
-
@article{Majchrowska:2022,
title = {Deep learning-based waste detection in natural and urban environments},
journal = {Waste Management},
From 80dc434fb0ffcd6737a721854aee61e74d90a0b4 Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 5 Sep 2022 15:45:11 -0700
Subject: [PATCH 21/73] try without space
---
paper.bib | 2 +-
paper.md | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/paper.bib b/paper.bib
index 15dac2b..ad582e8 100644
--- a/paper.bib
+++ b/paper.bib
@@ -100,7 +100,7 @@ @misc{Waterboards:2018
url = {https://github.com/CAWaterBoardDataCenter/Trash-Tracker}
}
-@article{van Lieshout:2020,
+@article{vanLieshout:2020,
author = {van Lieshout, Colin and van Oeveren, Kees and van Emmerik, Tim and Postma, Eric},
title = {Automated River Plastic Monitoring Using Deep Learning and Cameras},
journal = {Earth and Space Science},
diff --git a/paper.md b/paper.md
index 8a114e0..149a6a4 100644
--- a/paper.md
+++ b/paper.md
@@ -46,7 +46,7 @@ Although computer vision classification routines have been created for trash, th
# Statement of need
-The trash in the environment is a widespread problem that is difficult to measure. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an efficient yet effective manner `[@Majchrowska:2022; @Proença:2020; @Moore:2020; @van Lieshout:2020; @WADE AI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]`. An app-based reporting of trash using cell phones, laptops, and other devices has been a valuable solution `[@Lynch:2018]`. Applications for AI in detecting trash currently include: images from `bridges` [@van Lieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average data scientist. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
+The trash in the environment is a widespread problem that is difficult to measure. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an efficient yet effective manner `[@Majchrowska:2022; @Proença:2020; @Moore:2020; @van Lieshout:2020; @WADE AI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]`. An app-based reporting of trash using cell phones, laptops, and other devices has been a valuable solution `[@Lynch:2018]`. Applications for AI in detecting trash currently include: images from `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average data scientist. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
# Example
Video
From acd67ec0e6175ea58ba4f10e9267c50387564122 Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 5 Sep 2022 15:48:05 -0700
Subject: [PATCH 22/73] add all
---
paper.bib | 44 ++++++++++++++++++++++++++++++++++++++++++++
paper.md | 2 +-
paperbibtest.bib | 47 -----------------------------------------------
3 files changed, 45 insertions(+), 48 deletions(-)
delete mode 100644 paperbibtest.bib
diff --git a/paper.bib b/paper.bib
index ad582e8..68c92ee 100644
--- a/paper.bib
+++ b/paper.bib
@@ -150,3 +150,47 @@ @ARTICLE{Lynch:2018
+@misc{WADE AI:2020,
+ author = {K. Kerge, W. Cowger, K. Haamer, K. Ehala, K. Kivistik, T. Tammiste, M. Vares},
+ title = {WADE AI Trash Detection},
+ year = {2020},
+ publisher = {GitHub},
+ journal = {GitHub repository},
+ url = {https://github.com/letsdoitworld/wade-ai}
+}
+
+@misc{Wuu:2018,
+ author = {S. Wuu},
+ title = {Litter Detection Tensorflow},
+ year = {2018},
+ publisher = {GitHub},
+ journal = {GitHub repository},
+ url = {https://github.com/isaychris/litter-detection-tensorflow}
+}
+
+
+@article{Majchrowska:2022,
+title = {Deep learning-based waste detection in natural and urban environments},
+journal = {Waste Management},
+volume = {138},
+pages = {274-284},
+year = {2022},
+issn = {0956-053X},
+doi = {https://doi.org/10.1016/j.wasman.2021.12.001},
+url = {https://www.sciencedirect.com/science/article/pii/S0956053X21006474},
+author = {Sylwia Majchrowska and Agnieszka Mikołajczyk and Maria Ferlin and Zuzanna Klawikowska and Marta A. Plantykow and Arkadiusz Kwasigroch and Karol Majek},
+keywords = {Object detection, Semi-supervised learning, Waste classification benchmarks, Waste detection benchmarks, Waste localization, Waste recognition},
+abstract = {Waste pollution is one of the most significant environmental issues in the modern world. The importance of recycling is well known, both for economic and ecological reasons, and the industry demands high efficiency. Current studies towards automatic waste detection are hardly comparable due to the lack of benchmarks and widely accepted standards regarding the used metrics and data. Those problems are addressed in this article by providing a critical analysis of over ten existing waste datasets and a brief but constructive review of the existing Deep Learning-based waste detection approaches. This article collects and summarizes previous studies and provides the results of authors’ experiments on the presented datasets, all intended to create a first replicable baseline for litter detection. Moreover, new benchmark datasets detect-waste and classify-waste are proposed that are merged collections from the above-mentioned open-source datasets with unified annotations covering all possible waste categories: bio, glass, metal and plastic, non-recyclable, other, paper, and unknown. Finally, a two-stage detector for litter localization and classification is presented. EfficientDet-D2 is used to localize litter, and EfficientNet-B2 to classify the detected waste into seven categories. The classifier is trained in a semi-supervised fashion making the use of unlabeled images. The proposed approach achieves up to 70% of average precision in waste detection and around 75% of classification accuracy on the test dataset. The code and annotations used in the studies are publicly available online11https://github.com/wimlds-trojmiasto/detect-waste..}
+}
+
+@misc{Proença:2020,
+ doi = {10.48550/ARXIV.2003.06975},
+ url = {https://arxiv.org/abs/2003.06975},
+ author = {Proença, Pedro F and Simões, Pedro},
+ keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
+ title = {TACO: Trash Annotations in Context for Litter Detection},
+ publisher = {arXiv},
+ year = {2020},
+ copyright = {arXiv.org perpetual, non-exclusive license}
+}
+
diff --git a/paper.md b/paper.md
index 149a6a4..92618ba 100644
--- a/paper.md
+++ b/paper.md
@@ -46,7 +46,7 @@ Although computer vision classification routines have been created for trash, th
# Statement of need
-The trash in the environment is a widespread problem that is difficult to measure. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an efficient yet effective manner `[@Majchrowska:2022; @Proença:2020; @Moore:2020; @van Lieshout:2020; @WADE AI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]`. An app-based reporting of trash using cell phones, laptops, and other devices has been a valuable solution `[@Lynch:2018]`. Applications for AI in detecting trash currently include: images from `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average data scientist. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
+The trash in the environment is a widespread problem that is difficult to measure. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an `efficient yet effective manner` [@Majchrowska:2022; @Proença:2020; @Moore:2020; @vanLieshout:2020; @WADE AI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]. An app-based reporting of trash using cell phones, laptops, and other devices has been a `valuable solution` [@Lynch:2018]. Applications for AI in detecting trash currently include: images from `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average data scientist. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
# Example
Video
diff --git a/paperbibtest.bib b/paperbibtest.bib
deleted file mode 100644
index f6c6669..0000000
--- a/paperbibtest.bib
+++ /dev/null
@@ -1,47 +0,0 @@
-
-
-@misc{WADE AI:2020,
- author = {K. Kerge, W. Cowger, K. Haamer, K. Ehala, K. Kivistik, T. Tammiste, M. Vares},
- title = {WADE AI Trash Detection},
- year = {2020},
- publisher = {GitHub},
- journal = {GitHub repository},
- url = {https://github.com/letsdoitworld/wade-ai}
-}
-
-@misc{Wuu:2018,
- author = {S. Wuu},
- title = {Litter Detection Tensorflow},
- year = {2018},
- publisher = {GitHub},
- journal = {GitHub repository},
- url = {https://github.com/isaychris/litter-detection-tensorflow}
-}
-
-
-@article{Majchrowska:2022,
-title = {Deep learning-based waste detection in natural and urban environments},
-journal = {Waste Management},
-volume = {138},
-pages = {274-284},
-year = {2022},
-issn = {0956-053X},
-doi = {https://doi.org/10.1016/j.wasman.2021.12.001},
-url = {https://www.sciencedirect.com/science/article/pii/S0956053X21006474},
-author = {Sylwia Majchrowska and Agnieszka Mikołajczyk and Maria Ferlin and Zuzanna Klawikowska and Marta A. Plantykow and Arkadiusz Kwasigroch and Karol Majek},
-keywords = {Object detection, Semi-supervised learning, Waste classification benchmarks, Waste detection benchmarks, Waste localization, Waste recognition},
-abstract = {Waste pollution is one of the most significant environmental issues in the modern world. The importance of recycling is well known, both for economic and ecological reasons, and the industry demands high efficiency. Current studies towards automatic waste detection are hardly comparable due to the lack of benchmarks and widely accepted standards regarding the used metrics and data. Those problems are addressed in this article by providing a critical analysis of over ten existing waste datasets and a brief but constructive review of the existing Deep Learning-based waste detection approaches. This article collects and summarizes previous studies and provides the results of authors’ experiments on the presented datasets, all intended to create a first replicable baseline for litter detection. Moreover, new benchmark datasets detect-waste and classify-waste are proposed that are merged collections from the above-mentioned open-source datasets with unified annotations covering all possible waste categories: bio, glass, metal and plastic, non-recyclable, other, paper, and unknown. Finally, a two-stage detector for litter localization and classification is presented. EfficientDet-D2 is used to localize litter, and EfficientNet-B2 to classify the detected waste into seven categories. The classifier is trained in a semi-supervised fashion making the use of unlabeled images. The proposed approach achieves up to 70% of average precision in waste detection and around 75% of classification accuracy on the test dataset. The code and annotations used in the studies are publicly available online11https://github.com/wimlds-trojmiasto/detect-waste..}
-}
-
-@misc{Proença:2020,
- doi = {10.48550/ARXIV.2003.06975},
- url = {https://arxiv.org/abs/2003.06975},
- author = {Proença, Pedro F and Simões, Pedro},
- keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
- title = {TACO: Trash Annotations in Context for Litter Detection},
- publisher = {arXiv},
- year = {2020},
- copyright = {arXiv.org perpetual, non-exclusive license}
-}
-
-
From ce4de0959f174fa53b5352c8a22f2484d7b50fda Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 5 Sep 2022 15:50:38 -0700
Subject: [PATCH 23/73] no spaces allowed in citation names
---
paper.bib | 2 +-
paper.md | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/paper.bib b/paper.bib
index 68c92ee..562d126 100644
--- a/paper.bib
+++ b/paper.bib
@@ -150,7 +150,7 @@ @ARTICLE{Lynch:2018
-@misc{WADE AI:2020,
+@misc{WADEAI:2020,
author = {K. Kerge, W. Cowger, K. Haamer, K. Ehala, K. Kivistik, T. Tammiste, M. Vares},
title = {WADE AI Trash Detection},
year = {2020},
diff --git a/paper.md b/paper.md
index 92618ba..8fd0b43 100644
--- a/paper.md
+++ b/paper.md
@@ -46,7 +46,7 @@ Although computer vision classification routines have been created for trash, th
# Statement of need
-The trash in the environment is a widespread problem that is difficult to measure. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an `efficient yet effective manner` [@Majchrowska:2022; @Proença:2020; @Moore:2020; @vanLieshout:2020; @WADE AI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]. An app-based reporting of trash using cell phones, laptops, and other devices has been a `valuable solution` [@Lynch:2018]. Applications for AI in detecting trash currently include: images from `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average data scientist. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
+The trash in the environment is a widespread problem that is difficult to measure. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an `efficient yet effective manner` [@Majchrowska:2022; @Proença:2020; @Moore:2020; @vanLieshout:2020; @WADEAI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]. An app-based reporting of trash using cell phones, laptops, and other devices has been a `valuable solution` [@Lynch:2018]. Applications for AI in detecting trash currently include: images from `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average data scientist. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
# Example
Video
From 41dd5a14028ebfdade1ea15186f66d59574dcd8b Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 5 Sep 2022 16:42:30 -0700
Subject: [PATCH 24/73] add demo images
---
paper.md | 28 ++++++++++++++++++++--------
1 file changed, 20 insertions(+), 8 deletions(-)
diff --git a/paper.md b/paper.md
index 8fd0b43..c2c3735 100644
--- a/paper.md
+++ b/paper.md
@@ -48,14 +48,26 @@ Although computer vision classification routines have been created for trash, th
The trash in the environment is a widespread problem that is difficult to measure. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an `efficient yet effective manner` [@Majchrowska:2022; @Proença:2020; @Moore:2020; @vanLieshout:2020; @WADEAI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]. An app-based reporting of trash using cell phones, laptops, and other devices has been a `valuable solution` [@Lynch:2018]. Applications for AI in detecting trash currently include: images from `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average data scientist. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
-# Example
- Video
- Figures can be included like this:
-![Caption for example figure.\label{fig:example}](figure.png)
-and referenced from text using \autoref{fig:example}.
+# Demo
+We have a full video tutorial on [Youtube](https://youtu.be/HHrjUpQynUM)
-Figure sizes can be customized by adding an optional second parameter:
-![Caption for example figure.](figure.png){ width=20% }
+## Basic workflow:
+### 1. Upload images by dragging onto the screen.
+![Screenshot 2022-09-05 155313](https://user-images.githubusercontent.com/26821843/188520590-86d7b0b3-1b40-4ce5-8fb0-1be54b2de20e.png)
+### 2. View results while images are processing.
+![Screenshot 2022-09-05 155435](https://user-images.githubusercontent.com/26821843/188520700-43f4c964-c430-4a78-843b-68ae7aae2ba2.png)
+### 3. View summary results of detected trash.
+![Screenshot 2022-09-05 155606](https://user-images.githubusercontent.com/26821843/188520723-92b50200-d568-4953-aa26-fbcbbd965a38.png)
+### 4. View results mapped if your images have location stamp.
+![Screenshot 2022-09-05 155702](https://user-images.githubusercontent.com/26821843/188520745-65ef3270-6093-488a-b501-305ecb436bc1.png)
+### 5. Click download all to extract a zip folder with labeled images and metadata.
+![Screenshot 2022-09-05 163626](https://user-images.githubusercontent.com/26821843/188520813-f9169ba9-14d9-4f11-bf53-a6fd8e379cdf.png)
+### 6. View labeled images from downloaded results.
+![Screenshot 2022-09-05 160109](https://user-images.githubusercontent.com/26821843/188520833-d313279d-b2d0-4d37-ac0b-670ce3252540.png)
+### 7. View metadata for each image using image_hash.json (using https://jsoneditoronline.org/)
+![Screenshot 2022-09-05 162658](https://user-images.githubusercontent.com/26821843/188520860-629c529d-dc5e-4e93-9beb-b65e4560bc13.png)
+### 8. View metadata for all images in "summary.json" (using https://jsoneditoronline.org/)
+![Screenshot 2022-09-05 162853](https://user-images.githubusercontent.com/26821843/188520906-3061ecce-cb0e-4c76-9b81-303731110380.png)
# Method
@@ -77,4 +89,4 @@ This workflow is likely to be highly useful for a wide variety of computer visio
# Acknowledgements
Code for Sacramento led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento team, part of code for America, without whom this project would not have been possible and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. We acknowledge financial support from McPike Zima Charitable Foundation.
-# References
\ No newline at end of file
+# References
From cbae5afa2018378197b93fd2d2e118c8fb718436 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 5 Sep 2022 16:43:08 -0700
Subject: [PATCH 25/73] Update paper.md
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index c2c3735..13ccacf 100644
--- a/paper.md
+++ b/paper.md
@@ -64,7 +64,7 @@ We have a full video tutorial on [Youtube](https://youtu.be/HHrjUpQynUM)
![Screenshot 2022-09-05 163626](https://user-images.githubusercontent.com/26821843/188520813-f9169ba9-14d9-4f11-bf53-a6fd8e379cdf.png)
### 6. View labeled images from downloaded results.
![Screenshot 2022-09-05 160109](https://user-images.githubusercontent.com/26821843/188520833-d313279d-b2d0-4d37-ac0b-670ce3252540.png)
-### 7. View metadata for each image using image_hash.json (using https://jsoneditoronline.org/)
+### 7. View metadata for each image using "image_hash.json" (using https://jsoneditoronline.org/)
![Screenshot 2022-09-05 162658](https://user-images.githubusercontent.com/26821843/188520860-629c529d-dc5e-4e93-9beb-b65e4560bc13.png)
### 8. View metadata for all images in "summary.json" (using https://jsoneditoronline.org/)
![Screenshot 2022-09-05 162853](https://user-images.githubusercontent.com/26821843/188520906-3061ecce-cb0e-4c76-9b81-303731110380.png)
From d1075f37429b65ba74f1e28a0a5e7a8e36666232 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 5 Sep 2022 16:46:28 -0700
Subject: [PATCH 26/73] give images some space
---
paper.md | 15 +++++++++++++++
1 file changed, 15 insertions(+)
diff --git a/paper.md b/paper.md
index 13ccacf..fa60b46 100644
--- a/paper.md
+++ b/paper.md
@@ -53,20 +53,35 @@ We have a full video tutorial on [Youtube](https://youtu.be/HHrjUpQynUM)
## Basic workflow:
### 1. Upload images by dragging onto the screen.
+
![Screenshot 2022-09-05 155313](https://user-images.githubusercontent.com/26821843/188520590-86d7b0b3-1b40-4ce5-8fb0-1be54b2de20e.png)
+
### 2. View results while images are processing.
+
![Screenshot 2022-09-05 155435](https://user-images.githubusercontent.com/26821843/188520700-43f4c964-c430-4a78-843b-68ae7aae2ba2.png)
+
### 3. View summary results of detected trash.
+
![Screenshot 2022-09-05 155606](https://user-images.githubusercontent.com/26821843/188520723-92b50200-d568-4953-aa26-fbcbbd965a38.png)
+
### 4. View results mapped if your images have location stamp.
+
![Screenshot 2022-09-05 155702](https://user-images.githubusercontent.com/26821843/188520745-65ef3270-6093-488a-b501-305ecb436bc1.png)
+
### 5. Click download all to extract a zip folder with labeled images and metadata.
+
![Screenshot 2022-09-05 163626](https://user-images.githubusercontent.com/26821843/188520813-f9169ba9-14d9-4f11-bf53-a6fd8e379cdf.png)
+
### 6. View labeled images from downloaded results.
+
![Screenshot 2022-09-05 160109](https://user-images.githubusercontent.com/26821843/188520833-d313279d-b2d0-4d37-ac0b-670ce3252540.png)
+
### 7. View metadata for each image using "image_hash.json" (using https://jsoneditoronline.org/)
+
![Screenshot 2022-09-05 162658](https://user-images.githubusercontent.com/26821843/188520860-629c529d-dc5e-4e93-9beb-b65e4560bc13.png)
+
### 8. View metadata for all images in "summary.json" (using https://jsoneditoronline.org/)
+
![Screenshot 2022-09-05 162853](https://user-images.githubusercontent.com/26821843/188520906-3061ecce-cb0e-4c76-9b81-303731110380.png)
# Method
From 11b2c2ece28211685fb3df922e335c946f442093 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 5 Sep 2022 16:54:28 -0700
Subject: [PATCH 27/73] image captions
---
paper.md | 32 ++++++++++++++++----------------
1 file changed, 16 insertions(+), 16 deletions(-)
diff --git a/paper.md b/paper.md
index fa60b46..d428c1e 100644
--- a/paper.md
+++ b/paper.md
@@ -52,37 +52,37 @@ The trash in the environment is a widespread problem that is difficult to measur
We have a full video tutorial on [Youtube](https://youtu.be/HHrjUpQynUM)
## Basic workflow:
-### 1. Upload images by dragging onto the screen.
+### 1.
-![Screenshot 2022-09-05 155313](https://user-images.githubusercontent.com/26821843/188520590-86d7b0b3-1b40-4ce5-8fb0-1be54b2de20e.png)
+![Upload images by dragging onto the screen.\label{fig:example1}](https://user-images.githubusercontent.com/26821843/188520590-86d7b0b3-1b40-4ce5-8fb0-1be54b2de20e.png)
-### 2. View results while images are processing.
+### 2.
-![Screenshot 2022-09-05 155435](https://user-images.githubusercontent.com/26821843/188520700-43f4c964-c430-4a78-843b-68ae7aae2ba2.png)
+![View results while images are processing.\label{fig:example2}](https://user-images.githubusercontent.com/26821843/188520700-43f4c964-c430-4a78-843b-68ae7aae2ba2.png)
-### 3. View summary results of detected trash.
+### 3.
-![Screenshot 2022-09-05 155606](https://user-images.githubusercontent.com/26821843/188520723-92b50200-d568-4953-aa26-fbcbbd965a38.png)
+![View summary results of detected trash.\label{fig:example3}](https://user-images.githubusercontent.com/26821843/188520723-92b50200-d568-4953-aa26-fbcbbd965a38.png)
-### 4. View results mapped if your images have location stamp.
+### 4.
-![Screenshot 2022-09-05 155702](https://user-images.githubusercontent.com/26821843/188520745-65ef3270-6093-488a-b501-305ecb436bc1.png)
+![View results mapped if your images have location stamp.\label{fig:example4}](https://user-images.githubusercontent.com/26821843/188520745-65ef3270-6093-488a-b501-305ecb436bc1.png)
-### 5. Click download all to extract a zip folder with labeled images and metadata.
+### 5.
-![Screenshot 2022-09-05 163626](https://user-images.githubusercontent.com/26821843/188520813-f9169ba9-14d9-4f11-bf53-a6fd8e379cdf.png)
+![Click download all to extract a zip folder with labeled images and metadata.\label{fig:example5}](https://user-images.githubusercontent.com/26821843/188520813-f9169ba9-14d9-4f11-bf53-a6fd8e379cdf.png)
-### 6. View labeled images from downloaded results.
+### 6.
-![Screenshot 2022-09-05 160109](https://user-images.githubusercontent.com/26821843/188520833-d313279d-b2d0-4d37-ac0b-670ce3252540.png)
+![View labeled images from downloaded results.\label{fig:example6}](https://user-images.githubusercontent.com/26821843/188520833-d313279d-b2d0-4d37-ac0b-670ce3252540.png)
-### 7. View metadata for each image using "image_hash.json" (using https://jsoneditoronline.org/)
+### 7.
-![Screenshot 2022-09-05 162658](https://user-images.githubusercontent.com/26821843/188520860-629c529d-dc5e-4e93-9beb-b65e4560bc13.png)
+![View metadata for each image using "image_hash.json" (using https://jsoneditoronline.org/).\label{fig:example7}](https://user-images.githubusercontent.com/26821843/188520860-629c529d-dc5e-4e93-9beb-b65e4560bc13.png)
-### 8. View metadata for all images in "summary.json" (using https://jsoneditoronline.org/)
+### 8.
-![Screenshot 2022-09-05 162853](https://user-images.githubusercontent.com/26821843/188520906-3061ecce-cb0e-4c76-9b81-303731110380.png)
+![View metadata for all images in "summary.json" (using https://jsoneditoronline.org/).\label{fig:example8}](https://user-images.githubusercontent.com/26821843/188520906-3061ecce-cb0e-4c76-9b81-303731110380.png)
# Method
From 4b2a454127607e49493c55c5026997478950cd1b Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Mon, 12 Sep 2022 12:05:46 -0700
Subject: [PATCH 28/73] add mention of wade
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 015c746..889fd7e 100644
--- a/README.md
+++ b/README.md
@@ -9,7 +9,7 @@
### Project Summary
-Trash AI is a web application where users can upload photos of litter which will be labeled using computer vision to detect and categorize litter in the image by type. Trash AI will enhance the abilities of researchers to quickly label trash in photos.
+Trash AI is a web application where users can upload photos of litter which will be labeled using computer vision to detect and categorize litter in the image by type. Early inspiration from (WADE AI)[https://github.com/letsdoitworld/wade-ai] streamlined this development. Trash AI will enhance the abilities of researchers to quickly label trash in photos.
#### Demo
[![image](https://user-images.githubusercontent.com/26821843/188515526-33e1196b-6830-4187-8fe4-e68b2bd4019e.png)](https://youtu.be/HHrjUpQynUM)
From 4ff318f9c429d80adf701fee7131f2329e93818d Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Thu, 15 Sep 2022 16:55:42 -0700
Subject: [PATCH 29/73] update authors and acknowledgements
---
paper.md | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/paper.md b/paper.md
index d428c1e..07dd6ec 100644
--- a/paper.md
+++ b/paper.md
@@ -26,10 +26,14 @@ authors:
affiliation: 4
- name: Kris Haamer
affiliation: 4
+ - name: Gina
+ affiliation: 2
+ - name: Brianda
+ affiliation: 2
affiliations:
- name: Moore Institute for Plastic Pollution Research, USA
index: 1
- - name: Code for Sacramento, USA
+ - name: Code for Sacramento and Fresno, USA
index: 2
- name: California Department of Transportation, USA
index: 3
@@ -102,6 +106,6 @@ Trash AI is hosted on the web at www.trashai.org. The source code is [available
This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get the data that people share to our S3 bucket into the [TACO training dataset](http://tacodataset.org/) and trained model. Future models will expand the annotations to include the `Trash Taxonomy` [@Hapich:2022] classes and add an option to choose between other models besides the current model.
# Acknowledgements
-Code for Sacramento led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento team, part of code for America, without whom this project would not have been possible and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. We acknowledge financial support from McPike Zima Charitable Foundation.
+Code for Sacramento led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Fresno team, part of code for America, without whom this project would not have been possible and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular we would like to acknowledge Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, Democracy Lab, University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation.
# References
From c6b89e9be7a9e8151464f1673d9f7ef8aa488fff Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Thu, 22 Sep 2022 19:44:46 -0700
Subject: [PATCH 30/73] Updated with Dan's Recs
---
paper.md | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/paper.md b/paper.md
index 07dd6ec..a4c01a2 100644
--- a/paper.md
+++ b/paper.md
@@ -26,14 +26,14 @@ authors:
affiliation: 4
- name: Kris Haamer
affiliation: 4
- - name: Gina
+ - name: Gina Durante
affiliation: 2
- - name: Brianda
+ - name: Brianda Hernandez
affiliation: 2
affiliations:
- name: Moore Institute for Plastic Pollution Research, USA
index: 1
- - name: Code for Sacramento and Fresno, USA
+ - name: Code for Sacramento and Open Fresno, USA
index: 2
- name: California Department of Transportation, USA
index: 3
@@ -50,7 +50,7 @@ Although computer vision classification routines have been created for trash, th
# Statement of need
-The trash in the environment is a widespread problem that is difficult to measure. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an `efficient yet effective manner` [@Majchrowska:2022; @Proença:2020; @Moore:2020; @vanLieshout:2020; @WADEAI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]. An app-based reporting of trash using cell phones, laptops, and other devices has been a `valuable solution` [@Lynch:2018]. Applications for AI in detecting trash currently include: images from `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average data scientist. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
+The trash in the environment is a widespread problem that is difficult to measure. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an `efficient yet effective manner` [@Majchrowska:2022; @Proença:2020; @Moore:2020; @vanLieshout:2020; @WADEAI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]. An app-based reporting of trash using cell phones, laptops, and other devices has been a `valuable solution` [@Lynch:2018]. Applications for AI in detecting trash currently include: images from `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average litter researcher. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have affordable browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
# Demo
We have a full video tutorial on [Youtube](https://youtu.be/HHrjUpQynUM)
@@ -91,7 +91,7 @@ We have a full video tutorial on [Youtube](https://youtu.be/HHrjUpQynUM)
# Method
## Workflow Overview
-Trash AI is trained on the [TACO dataset](http://tacodataset.org/) using [YOLO 5](pytorch.org). Trash AI stores images in [IndexDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API) to keep the data primarily browser side and uses [tensorflow.js](https://www.tensorflow.org/js) to keep analysis browser side too. When images are uploaded to the browser, Trash AI provides the prediction of the model as a graphical output. The raw data from the model and labeled images can be downloaded in a batch download to expedite analyses. Any data uploaded to the platform is automatically saved to an [S3 bucket](https://aws.amazon.com/s3/) which we can use to improve the model over time.
+Trash AI is trained on the [TACO dataset](http://tacodataset.org/) using [YOLO 5](pytorch.org). Trash AI stores images in [IndexDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API) to keep the data primarily browser side and uses [tensorflow.js](https://www.tensorflow.org/js) to keep analysis browser side too. When images are uploaded to the browser, Trash AI provides the prediction of the model as a graphical output. The raw data from the model and labeled images can be downloaded in a batch download to expedite analyses. Any data uploaded to the platform may be automatically saved to an [S3 bucket](https://aws.amazon.com/s3/) which we can use to improve the model over time.
## AI Training
The AI model was developed starting with the TACO dataset which was available with a complimentary Jupyter Notebook on [Kaggle](https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced which used the default `YOLO v5 model` [@Jocher:2020] as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
@@ -100,12 +100,12 @@ The AI model was developed starting with the TACO dataset which was available wi
From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g. a drink bottle on someone's desk vs in the forest laying on the ground). This and other complexities to trash classification make a general trash AI a challenging (yet worthwhile) long term endeavor. The algorithm is primarily trained on single pieces of trash in the image with the trash laying on the ground and the model will likely excel for that use case currently.
# Availability
-Trash AI is hosted on the web at www.trashai.org. The source code is [available on github](https://github.com/code4sac/trash-ai) with an [MIT license](https://mit-license.org/). The source code can be run offline on any machine that can install [Docker and Docker-compose](www.docker.com). [Documentation](https://github.com/code4sac/trash-ai#ai-for-litter-detection-web-application) is maintained by Code for Sacramento on Github and will be updated with each release. The image datasets shared to the tool are in an S3 Bucket that needs to be reviewed before being shared with others due to security and moderation concerns but can be acquired by [contacting the repo maintaniers](https://github.com/code4sac/trash-ai/graphs/contributors).
+Trash AI is hosted on the web at www.trashai.org. The source code is [available on github](https://github.com/code4sac/trash-ai) with an [MIT license](https://mit-license.org/). The source code can be run offline on any machine that can install [Docker and Docker-compose](www.docker.com). [Documentation](https://github.com/code4sac/trash-ai#ai-for-litter-detection-web-application) is maintained by Code for Sacramento and Open Fresno on Github and will be updated with each release. [Nonexhaustive instructions for AWS deployment](https://github.com/code4sac/trash-ai/blob/manuscript/docs/git-aws-account-setup.md) is available for anyone attempting production level deployment. The image datasets shared to the tool are in an S3 Bucket that needs to be reviewed before being shared with others due to security and moderation concerns but can be acquired by [contacting the repo maintaniers](https://github.com/code4sac/trash-ai/graphs/contributors).
# Future Goals
This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get the data that people share to our S3 bucket into the [TACO training dataset](http://tacodataset.org/) and trained model. Future models will expand the annotations to include the `Trash Taxonomy` [@Hapich:2022] classes and add an option to choose between other models besides the current model.
# Acknowledgements
-Code for Sacramento led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Fresno team, part of code for America, without whom this project would not have been possible and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular we would like to acknowledge Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, Democracy Lab, University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation.
+Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of code for America, without whom this project would not have been possible and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular we would like to acknowledge Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, Democracy Lab, University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation.
# References
From 509003052de689e8e2baada4fefa5ad4d28e7463 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Wed, 28 Sep 2022 19:19:31 +0200
Subject: [PATCH 31/73] Mary Comments
https://github.com/code4sac/trash-ai/pull/69#issuecomment-1257554556
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index a4c01a2..879bd86 100644
--- a/paper.md
+++ b/paper.md
@@ -106,6 +106,6 @@ Trash AI is hosted on the web at www.trashai.org. The source code is [available
This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get the data that people share to our S3 bucket into the [TACO training dataset](http://tacodataset.org/) and trained model. Future models will expand the annotations to include the `Trash Taxonomy` [@Hapich:2022] classes and add an option to choose between other models besides the current model.
# Acknowledgements
-Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of code for America, without whom this project would not have been possible and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular we would like to acknowledge Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, Democracy Lab, University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation.
+Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of code for America, without whom this project would not have been possible and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular we would like to acknowledge Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation.
# References
From 3652a152ea3d3afb7e6df823f0791b1bb0e081a4 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Wed, 28 Sep 2022 19:22:01 +0200
Subject: [PATCH 32/73] SteveO comments
https://github.com/code4sac/trash-ai/pull/69#pullrequestreview-1119350215
---
paper.md | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/paper.md b/paper.md
index 879bd86..43ddbad 100644
--- a/paper.md
+++ b/paper.md
@@ -9,6 +9,10 @@ tags:
- AI
- Image Classification
- Serverless
+ - vue
+ - vuetify
+ - vite
+ - pinia
authors:
- name: Win Cowger
orcid: 0000-0001-9226-3104
From 39726ed2705bfc3580c8e8dc7289484a85c21e59 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Wed, 28 Sep 2022 19:30:50 +0200
Subject: [PATCH 33/73] Walter's comments
https://github.com/code4sac/trash-ai/pull/69#pullrequestreview-1119351559
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index 43ddbad..5f0b2a4 100644
--- a/paper.md
+++ b/paper.md
@@ -101,7 +101,7 @@ Trash AI is trained on the [TACO dataset](http://tacodataset.org/) using [YOLO 5
The AI model was developed starting with the TACO dataset which was available with a complimentary Jupyter Notebook on [Kaggle](https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced which used the default `YOLO v5 model` [@Jocher:2020] as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
## Limitations
-From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g. a drink bottle on someone's desk vs in the forest laying on the ground). This and other complexities to trash classification make a general trash AI a challenging (yet worthwhile) long term endeavor. The algorithm is primarily trained on single pieces of trash in the image with the trash laying on the ground and the model will likely excel for that use case currently.
+From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g. a drink bottle on someone's desk vs in the forest laying on the ground). This and other complexities to trash classification make a general trash AI a challenging (yet worthwhile) long term endeavor. The algorithm is primarily trained on single pieces of trash in the image with the trash laying on the ground and the model will likely excel for that use case currently. Additionally, user feedback has shows that the distance of trash from the camera is a critical aspect. The model performs ideally with single pieces of trash in an image less than 1 m away. The model performs less accurately on images when trash which is farther away such as when taken from a vehicle. This is likely due to the training data, TACO dataset, which consists primarily of images of trash close to the camera.
# Availability
Trash AI is hosted on the web at www.trashai.org. The source code is [available on github](https://github.com/code4sac/trash-ai) with an [MIT license](https://mit-license.org/). The source code can be run offline on any machine that can install [Docker and Docker-compose](www.docker.com). [Documentation](https://github.com/code4sac/trash-ai#ai-for-litter-detection-web-application) is maintained by Code for Sacramento and Open Fresno on Github and will be updated with each release. [Nonexhaustive instructions for AWS deployment](https://github.com/code4sac/trash-ai/blob/manuscript/docs/git-aws-account-setup.md) is available for anyone attempting production level deployment. The image datasets shared to the tool are in an S3 Bucket that needs to be reviewed before being shared with others due to security and moderation concerns but can be acquired by [contacting the repo maintaniers](https://github.com/code4sac/trash-ai/graphs/contributors).
From f53d12efa5b5c621968c45fdde94844521d6b037 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Wed, 28 Sep 2022 22:57:09 +0200
Subject: [PATCH 34/73] Create config.yml
---
.github/ISSUE_TEMPLATE/config.yml | 1 +
1 file changed, 1 insertion(+)
create mode 100644 .github/ISSUE_TEMPLATE/config.yml
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000..0086358
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1 @@
+blank_issues_enabled: true
From 6e5361b89ff0d011d8fd9b5d1516991619ea7999 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Wed, 28 Sep 2022 22:58:22 +0200
Subject: [PATCH 35/73] Create bug.yml
---
.github/ISSUE_TEMPLATE/bug.yml | 91 ++++++++++++++++++++++++++++++++++
1 file changed, 91 insertions(+)
create mode 100644 .github/ISSUE_TEMPLATE/bug.yml
diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml
new file mode 100644
index 0000000..967a263
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug.yml
@@ -0,0 +1,91 @@
+name: Bug report
+description: Report a problem you encountered
+title: "[Bug]: "
+labels: ["bug"]
+body:
+ - type: markdown
+ attributes:
+ value: Thanks for taking the time to complete this bug report!
+ - type: checkboxes
+ id: terms
+ attributes:
+ label: Guidelines
+ description: By submitting this issue, you agree to follow our [Contributing Guidelines](https://github.com/Appsilon/.github/blob/main/CONTRIBUTING.md).
+ options:
+ - label: I agree to follow this project's Contributing Guidelines.
+ required: true
+ - type: input
+ id: project
+ attributes:
+ label: Project Version
+ description: Which project versions are affected?
+ placeholder: "1.0, 2.2, 3.5.1"
+ validations:
+ required: false
+ - type: input
+ id: platform
+ attributes:
+ label: Platform and OS Version
+ description: Which platforms or operating systems are affected?
+ placeholder: "macOS 10.15.1, Ubuntu 20.04"
+ validations:
+ required: false
+ - type: input
+ id: existing-issues
+ attributes:
+ label: Existing Issues
+ description: Are there any similar existing issues?
+ placeholder: "#42"
+ validations:
+ required: false
+ - type: textarea
+ id: what-happened
+ attributes:
+ label: What happened?
+ description: Clearly and concisely describe the bug.
+ placeholder: Tell us what happened.
+ validations:
+ required: true
+ - type: textarea
+ id: repro-steps
+ attributes:
+ label: Steps to reproduce
+ value: |
+ 1.
+ 2.
+ 3.
+ ...
+ validations:
+ required: true
+ - type: textarea
+ id: expected-behavior
+ attributes:
+ label: Expected behavior
+ description: What should have happened?
+ placeholder: What did you expect to happen?
+ validations:
+ required: true
+ - type: textarea
+ id: attachments
+ attributes:
+ label: Attachments
+ description: Please include code snippets, stack traces, or compiler errors.
+ placeholder: Paste code snippets, stack traces, and compiler errors here
+ validations:
+ required: false
+ - type: textarea
+ id: screenshots
+ attributes:
+ label: Screenshots or Videos
+ description: Add screenshots, gifs, or videos to help explain your problem.
+ placeholder: Upload screenshots, gifs, and videos here
+ validations:
+ required: false
+ - type: textarea
+ id: additional
+ attributes:
+ label: Additional Information
+ description: Add any other useful information about the problem here.
+ placeholder: Is there any additional helpful information you can share?
+ validations:
+ required: false
From d50cb69ee83a616c3a3737c0717d262c762c8568 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Wed, 28 Sep 2022 22:58:55 +0200
Subject: [PATCH 36/73] Create feature.yml
---
.github/ISSUE_TEMPLATE/feature.yml | 48 ++++++++++++++++++++++++++++++
1 file changed, 48 insertions(+)
create mode 100644 .github/ISSUE_TEMPLATE/feature.yml
diff --git a/.github/ISSUE_TEMPLATE/feature.yml b/.github/ISSUE_TEMPLATE/feature.yml
new file mode 100644
index 0000000..f97c5d4
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature.yml
@@ -0,0 +1,48 @@
+name: Feature request
+description: Suggest an idea for this project
+title: "[Feature]: "
+labels: ["feature request", "enhancement", "feature"]
+body:
+ - type: markdown
+ attributes:
+ value: Thanks for taking the time to complete this form!
+ - type: checkboxes
+ id: terms
+ attributes:
+ label: Guidelines
+ description: By submitting this issue, you agree to follow our [Contributing Guidelines](https://github.com/Appsilon/.github/blob/main/CONTRIBUTING.md).
+ options:
+ - label: I agree to follow this project's Contributing Guidelines.
+ required: true
+ - type: textarea
+ id: description
+ attributes:
+ label: Description
+ description: Clearly and concisely describe what you would like to change, add, or implement.
+ placeholder: Tell us your idea.
+ validations:
+ required: true
+ - type: textarea
+ id: problem
+ attributes:
+ label: Problem
+ description: Is your feature request related to a problem?
+ placeholder: What problem will this solve?
+ validations:
+ required: true
+ - type: textarea
+ id: solution
+ attributes:
+ label: Proposed Solution
+ description: How should this be solved?
+ placeholder: How do you think this should be implemented?
+ validations:
+ required: true
+ - type: textarea
+ id: alternatives
+ attributes:
+ label: Alternatives Considered
+ description: Are there other possible approaches?
+ placeholder: Can you think of any other options?
+ validations:
+ required: true
From 67fc98c3a153362bb8b305c718ae5f9c7a3c5edb Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Wed, 28 Sep 2022 23:00:34 +0200
Subject: [PATCH 37/73] hyperlink
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 889fd7e..3ddc864 100644
--- a/README.md
+++ b/README.md
@@ -9,7 +9,7 @@
### Project Summary
-Trash AI is a web application where users can upload photos of litter which will be labeled using computer vision to detect and categorize litter in the image by type. Early inspiration from (WADE AI)[https://github.com/letsdoitworld/wade-ai] streamlined this development. Trash AI will enhance the abilities of researchers to quickly label trash in photos.
+Trash AI is a web application where users can upload photos of litter which will be labeled using computer vision to detect and categorize litter in the image by type. Early inspiration from [WADE AI](https://github.com/letsdoitworld/wade-ai) streamlined this development. Trash AI will enhance the abilities of researchers to quickly label trash in photos.
#### Demo
[![image](https://user-images.githubusercontent.com/26821843/188515526-33e1196b-6830-4187-8fe4-e68b2bd4019e.png)](https://youtu.be/HHrjUpQynUM)
From ac33012c717f8a28208f8c45bff6ec35afc8f082 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Wed, 28 Sep 2022 23:01:25 +0200
Subject: [PATCH 38/73] Update bug.yml
---
.github/ISSUE_TEMPLATE/bug.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml
index 967a263..e7eb59c 100644
--- a/.github/ISSUE_TEMPLATE/bug.yml
+++ b/.github/ISSUE_TEMPLATE/bug.yml
@@ -10,7 +10,7 @@ body:
id: terms
attributes:
label: Guidelines
- description: By submitting this issue, you agree to follow our [Contributing Guidelines](https://github.com/Appsilon/.github/blob/main/CONTRIBUTING.md).
+ description: By submitting this issue, you agree to follow our [Contributing Guidelines](https://www.contributor-covenant.org/version/2/1/code_of_conduct/).
options:
- label: I agree to follow this project's Contributing Guidelines.
required: true
From 35a35e1227210eb1b408eef1d3fabfd127b2ff5a Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Wed, 28 Sep 2022 23:01:55 +0200
Subject: [PATCH 39/73] Update feature.yml
---
.github/ISSUE_TEMPLATE/feature.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/ISSUE_TEMPLATE/feature.yml b/.github/ISSUE_TEMPLATE/feature.yml
index f97c5d4..c2b1b36 100644
--- a/.github/ISSUE_TEMPLATE/feature.yml
+++ b/.github/ISSUE_TEMPLATE/feature.yml
@@ -10,7 +10,7 @@ body:
id: terms
attributes:
label: Guidelines
- description: By submitting this issue, you agree to follow our [Contributing Guidelines](https://github.com/Appsilon/.github/blob/main/CONTRIBUTING.md).
+ description: By submitting this issue, you agree to follow our [Contributing Guidelines](https://www.contributor-covenant.org/version/2/1/code_of_conduct/).
options:
- label: I agree to follow this project's Contributing Guidelines.
required: true
From 4d30772c4cb3d5fc8a95bcfa9dbf0eaea5378daf Mon Sep 17 00:00:00 2001
From: wincowgerDEV
Date: Sun, 16 Oct 2022 13:02:50 +0200
Subject: [PATCH 40/73] Added in Kristiina and Kris's comments
---
paper.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/paper.md b/paper.md
index d428c1e..0e42ba1 100644
--- a/paper.md
+++ b/paper.md
@@ -46,7 +46,7 @@ Although computer vision classification routines have been created for trash, th
# Statement of need
-The trash in the environment is a widespread problem that is difficult to measure. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an `efficient yet effective manner` [@Majchrowska:2022; @Proença:2020; @Moore:2020; @vanLieshout:2020; @WADEAI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]. An app-based reporting of trash using cell phones, laptops, and other devices has been a `valuable solution` [@Lynch:2018]. Applications for AI in detecting trash currently include: images from `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average data scientist. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
+The trash in the environment is a widespread problem that is difficult to measure. Policy makers require high quality data on trash to create effective policies. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an `efficient yet effective manner` [@Majchrowska:2022; @Proença:2020; @Moore:2020; @vanLieshout:2020; @WADEAI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]. An app-based reporting of trash using cell phones, laptops, and other devices has been a `valuable solution` [@Lynch:2018]. Applications for AI in detecting trash currently include: images from `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average data scientist. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
# Demo
We have a full video tutorial on [Youtube](https://youtu.be/HHrjUpQynUM)
@@ -93,7 +93,7 @@ Trash AI is trained on the [TACO dataset](http://tacodataset.org/) using [YOLO 5
The AI model was developed starting with the TACO dataset which was available with a complimentary Jupyter Notebook on [Kaggle](https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced which used the default `YOLO v5 model` [@Jocher:2020] as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
## Limitations
-From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g. a drink bottle on someone's desk vs in the forest laying on the ground). This and other complexities to trash classification make a general trash AI a challenging (yet worthwhile) long term endeavor. The algorithm is primarily trained on single pieces of trash in the image with the trash laying on the ground and the model will likely excel for that use case currently.
+From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. Trash is a word people use for an object that lacks purpose, and the purpose of an object is often not obvious in an image. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g., a drink bottle on someone's desk vs in the forest laying on the ground). This is the main challenge with any image-based trash detection algorithm. Not everything that LOOKS like trash IS trash. This and other complexities to trash classification make a general trash AI a challenging, yet worthwhile, long term endeavor. Additionally, the algorithm is primarily trained on single pieces of trash in the image with the trash laying on the ground and the model will likely excel for those image contexts currently.
# Availability
Trash AI is hosted on the web at www.trashai.org. The source code is [available on github](https://github.com/code4sac/trash-ai) with an [MIT license](https://mit-license.org/). The source code can be run offline on any machine that can install [Docker and Docker-compose](www.docker.com). [Documentation](https://github.com/code4sac/trash-ai#ai-for-litter-detection-web-application) is maintained by Code for Sacramento on Github and will be updated with each release. The image datasets shared to the tool are in an S3 Bucket that needs to be reviewed before being shared with others due to security and moderation concerns but can be acquired by [contacting the repo maintaniers](https://github.com/code4sac/trash-ai/graphs/contributors).
From 8c4f47d2bc01cd5e6914e50e7ebd7cf8a0ac8977 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Wed, 26 Oct 2022 11:10:49 +0200
Subject: [PATCH 41/73] Update README.md
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 3ddc864..a64499f 100644
--- a/README.md
+++ b/README.md
@@ -9,7 +9,7 @@
### Project Summary
-Trash AI is a web application where users can upload photos of litter which will be labeled using computer vision to detect and categorize litter in the image by type. Early inspiration from [WADE AI](https://github.com/letsdoitworld/wade-ai) streamlined this development. Trash AI will enhance the abilities of researchers to quickly label trash in photos.
+Trash AI is a web application where users can upload photos of litter, which will be labeled using computer vision to detect and categorize litter in the image by type. Early inspiration from [WADE AI](https://github.com/letsdoitworld/wade-ai) streamlined this development. Trash AI will enhance the abilities of researchers to quickly label trash in photos.
#### Demo
[![image](https://user-images.githubusercontent.com/26821843/188515526-33e1196b-6830-4187-8fe4-e68b2bd4019e.png)](https://youtu.be/HHrjUpQynUM)
From 3a33857c8ff03c2b308c58b3b5a7bda3a3a79ba9 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 31 Oct 2022 10:05:50 +0100
Subject: [PATCH 42/73]
https://github.com/code4sac/trash-ai/pull/69#discussion_r1006489065
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index dea73a3..7a9a473 100644
--- a/paper.md
+++ b/paper.md
@@ -49,7 +49,7 @@ bibliography: paper.bib
---
# Summary
-Although computer vision classification routines have been created for trash, they have not been accessible to most researchers due to the challenges in deploying the models. Trash AI is a web GUI (Graphical User Interface) for serverless computer vision classification of batch images with trash in them hosted at www.trashai.org. With a single batch upload and download, a user can automatically describe the types and quantities of trash in all of their images.
+Although computer vision classification routines have been created for trash, they have not been accessible to most researchers due to the challenges in deploying the models. Trash AI is a web GUI (Graphical User Interface) for serverless computer vision classification of individual items of trash within images, hosted at www.trashai.org. With a single batch upload and download, a user can automatically describe the types and quantities of trash in all of their images.
# Statement of need
The trash in the environment is a widespread problem that is difficult to measure. Policy makers require high quality data on trash to create effective policies. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an `efficient yet effective manner` [@Majchrowska:2022; @Proença:2020; @Moore:2020; @vanLieshout:2020; @WADEAI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]. An app-based reporting of trash using cell phones, laptops, and other devices has been a `valuable solution` [@Lynch:2018]. Applications for AI in detecting trash currently include: images from `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average litter researcher. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have affordable browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
From 01e6bad6f97ffc431a635a7883476e12524ac3f0 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 31 Oct 2022 10:11:07 +0100
Subject: [PATCH 43/73]
https://github.com/code4sac/trash-ai/pull/69#discussion_r1006514485
---
paper.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/paper.md b/paper.md
index 7a9a473..11849a9 100644
--- a/paper.md
+++ b/paper.md
@@ -52,7 +52,7 @@ bibliography: paper.bib
Although computer vision classification routines have been created for trash, they have not been accessible to most researchers due to the challenges in deploying the models. Trash AI is a web GUI (Graphical User Interface) for serverless computer vision classification of individual items of trash within images, hosted at www.trashai.org. With a single batch upload and download, a user can automatically describe the types and quantities of trash in all of their images.
# Statement of need
-The trash in the environment is a widespread problem that is difficult to measure. Policy makers require high quality data on trash to create effective policies. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an `efficient yet effective manner` [@Majchrowska:2022; @Proença:2020; @Moore:2020; @vanLieshout:2020; @WADEAI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]. An app-based reporting of trash using cell phones, laptops, and other devices has been a `valuable solution` [@Lynch:2018]. Applications for AI in detecting trash currently include: images from `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average litter researcher. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g. tensorflow.js) and serverless architecture (e.g. AWS Lambda) have created the opportunity to have affordable browser-side artificial intelligence in a web GUI alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
+The trash in the environment is a widespread problem that is difficult to measure. Policy makers require high quality data on trash to create effective policies. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an `efficient yet effective manner` [@Majchrowska:2022; @Proença:2020; @Moore:2020; @vanLieshout:2020; @WADEAI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]. An app-based reporting of trash using cell phones, laptops, and other devices has been a `valuable solution` [@Lynch:2018]. Applications for AI in detecting trash currently include: images from `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average litter researcher. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g., tensorflow.js) and serverless architecture (e.g., AWS Lambda) have created the opportunity to have affordable browser-side artificial intelligence in a web GUI, alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
# Demo
We have a full video tutorial on [Youtube](https://youtu.be/HHrjUpQynUM)
@@ -99,7 +99,7 @@ Trash AI is trained on the [TACO dataset](http://tacodataset.org/) using [YOLO 5
The AI model was developed starting with the TACO dataset which was available with a complimentary Jupyter Notebook on [Kaggle](https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced which used the default `YOLO v5 model` [@Jocher:2020] as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
## Limitations
-From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. Trash is a word people use for an object that lacks purpose, and the purpose of an object is often not obvious in an image. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g. a drink bottle on someone's desk vs in the forest laying on the ground). This is the main challenge with any image-based trash detection algorithm. Not everything that LOOKS like trash IS trash. This and other complexities to trash classification make a general trash AI a challenging (yet worthwhile) long term endeavor. The algorithm is primarily trained on single pieces of trash in the image with the trash laying on the ground and the model will likely excel for that use case currently. Additionally, user feedback has shows that the distance of trash from the camera is a critical aspect. The model performs ideally with single pieces of trash in an image less than 1 m away. The model performs less accurately on images when trash which is farther away such as when taken from a vehicle. This is likely due to the training data, TACO dataset, which consists primarily of images of trash close to the camera.
+From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. Trash is a word people use for an object that lacks purpose, and the purpose of an object is often not obvious in an image. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g., a drink bottle on someone's desk vs in the forest laying on the ground). This is the main challenge with any image-based trash detection algorithm. Not everything that LOOKS like trash IS trash. This and other complexities to trash classification make a general trash AI a challenging (yet worthwhile) long term endeavor. The algorithm is primarily trained on single pieces of trash in the image with the trash laying on the ground and the model will likely excel for that use case currently. Additionally, user feedback has shows that the distance of trash from the camera is a critical aspect. The model performs ideally with single pieces of trash in an image less than 1 m away. The model performs less accurately on images when trash which is farther away such as when taken from a vehicle. This is likely due to the training data, TACO dataset, which consists primarily of images of trash close to the camera.
# Availability
Trash AI is hosted on the web at www.trashai.org. The source code is [available on github](https://github.com/code4sac/trash-ai) with an [MIT license](https://mit-license.org/). The source code can be run offline on any machine that can install [Docker and Docker-compose](www.docker.com). [Documentation](https://github.com/code4sac/trash-ai#ai-for-litter-detection-web-application) is maintained by Code for Sacramento and Open Fresno on Github and will be updated with each release. [Nonexhaustive instructions for AWS deployment](https://github.com/code4sac/trash-ai/blob/manuscript/docs/git-aws-account-setup.md) is available for anyone attempting production level deployment. The image datasets shared to the tool are in an S3 Bucket that needs to be reviewed before being shared with others due to security and moderation concerns but can be acquired by [contacting the repo maintaniers](https://github.com/code4sac/trash-ai/graphs/contributors).
From 14eb61cb7ef35ff83a27bfc045718aef1ecbb60e Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 31 Oct 2022 10:12:21 +0100
Subject: [PATCH 44/73]
https://github.com/code4sac/trash-ai/pull/69#discussion_r1006517045
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index 11849a9..b34ec99 100644
--- a/paper.md
+++ b/paper.md
@@ -93,7 +93,7 @@ We have a full video tutorial on [Youtube](https://youtu.be/HHrjUpQynUM)
# Method
## Workflow Overview
-Trash AI is trained on the [TACO dataset](http://tacodataset.org/) using [YOLO 5](pytorch.org). Trash AI stores images in [IndexDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API) to keep the data primarily browser side and uses [tensorflow.js](https://www.tensorflow.org/js) to keep analysis browser side too. When images are uploaded to the browser, Trash AI provides the prediction of the model as a graphical output. The raw data from the model and labeled images can be downloaded in a batch download to expedite analyses. Any data uploaded to the platform may be automatically saved to an [S3 bucket](https://aws.amazon.com/s3/) which we can use to improve the model over time.
+Trash AI is trained on the [TACO dataset](http://tacodataset.org/) using [YOLO 5](pytorch.org). Trash AI stores images in [IndexDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API) to keep the data primarily browser side and uses [tensorflow.js](https://www.tensorflow.org/js) to keep analysis browser side too. When images are uploaded to the browser, Trash AI provides the prediction of the model as a graphical output. The raw data from the model and labeled images can be downloaded in a batch download to expedite analyses. Any data uploaded to the platform may be automatically saved to an [S3 bucket](https://aws.amazon.com/s3/), which we can use to improve the model over time.
## AI Training
The AI model was developed starting with the TACO dataset which was available with a complimentary Jupyter Notebook on [Kaggle](https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced which used the default `YOLO v5 model` [@Jocher:2020] as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
From 1654df8a4e2224fa744d99d8fcafd30b2587e773 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 31 Oct 2022 10:14:40 +0100
Subject: [PATCH 45/73]
https://github.com/code4sac/trash-ai/pull/69#discussion_r1006519572
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index b34ec99..5389d4e 100644
--- a/paper.md
+++ b/paper.md
@@ -96,7 +96,7 @@ We have a full video tutorial on [Youtube](https://youtu.be/HHrjUpQynUM)
Trash AI is trained on the [TACO dataset](http://tacodataset.org/) using [YOLO 5](pytorch.org). Trash AI stores images in [IndexDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API) to keep the data primarily browser side and uses [tensorflow.js](https://www.tensorflow.org/js) to keep analysis browser side too. When images are uploaded to the browser, Trash AI provides the prediction of the model as a graphical output. The raw data from the model and labeled images can be downloaded in a batch download to expedite analyses. Any data uploaded to the platform may be automatically saved to an [S3 bucket](https://aws.amazon.com/s3/), which we can use to improve the model over time.
## AI Training
-The AI model was developed starting with the TACO dataset which was available with a complimentary Jupyter Notebook on [Kaggle](https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced which used the default `YOLO v5 model` [@Jocher:2020] as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
+The AI model was developed starting with the TACO dataset, which was available with a complimentary Jupyter Notebook on [Kaggle](https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced, which used the default `YOLO v5 model` [@Jocher:2020] as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
## Limitations
From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. Trash is a word people use for an object that lacks purpose, and the purpose of an object is often not obvious in an image. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g., a drink bottle on someone's desk vs in the forest laying on the ground). This is the main challenge with any image-based trash detection algorithm. Not everything that LOOKS like trash IS trash. This and other complexities to trash classification make a general trash AI a challenging (yet worthwhile) long term endeavor. The algorithm is primarily trained on single pieces of trash in the image with the trash laying on the ground and the model will likely excel for that use case currently. Additionally, user feedback has shows that the distance of trash from the camera is a critical aspect. The model performs ideally with single pieces of trash in an image less than 1 m away. The model performs less accurately on images when trash which is farther away such as when taken from a vehicle. This is likely due to the training data, TACO dataset, which consists primarily of images of trash close to the camera.
From 0b4743ed252ad7f7351950aecef4c1c77e8e7aa0 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 31 Oct 2022 10:19:33 +0100
Subject: [PATCH 46/73]
https://github.com/code4sac/trash-ai/pull/69#discussion_r1006536610
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index 5389d4e..cffa143 100644
--- a/paper.md
+++ b/paper.md
@@ -99,7 +99,7 @@ Trash AI is trained on the [TACO dataset](http://tacodataset.org/) using [YOLO 5
The AI model was developed starting with the TACO dataset, which was available with a complimentary Jupyter Notebook on [Kaggle](https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced, which used the default `YOLO v5 model` [@Jocher:2020] as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
## Limitations
-From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. Trash is a word people use for an object that lacks purpose, and the purpose of an object is often not obvious in an image. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g., a drink bottle on someone's desk vs in the forest laying on the ground). This is the main challenge with any image-based trash detection algorithm. Not everything that LOOKS like trash IS trash. This and other complexities to trash classification make a general trash AI a challenging (yet worthwhile) long term endeavor. The algorithm is primarily trained on single pieces of trash in the image with the trash laying on the ground and the model will likely excel for that use case currently. Additionally, user feedback has shows that the distance of trash from the camera is a critical aspect. The model performs ideally with single pieces of trash in an image less than 1 m away. The model performs less accurately on images when trash which is farther away such as when taken from a vehicle. This is likely due to the training data, TACO dataset, which consists primarily of images of trash close to the camera.
+From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. Trash is a word people use for an object that lacks purpose, and the purpose of an object is often not obvious in an image. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g., a drink bottle on someone's desk vs in the forest laying on the ground). This is the main challenge with any image-based trash detection algorithm. Not everything that LOOKS like trash IS trash. This and other complexities to trash classification make a general trash AI a challenging (yet worthwhile) long-term endeavor. The algorithm is primarily trained on single pieces of trash in the image, with the trash laying on the ground. Thus, model class prediction of trash in these kinds of images will generally be better than trash appearing in aerial images, for example. Additionally, user feedback has shows that the distance of trash from the camera is a critical aspect. The model performs ideally with single pieces of trash in an image less than 1 m away. The model performs less accurately on images when trash which is farther away such as when taken from a vehicle. This is likely due to the training data, TACO dataset, which consists primarily of images of trash close to the camera.
# Availability
Trash AI is hosted on the web at www.trashai.org. The source code is [available on github](https://github.com/code4sac/trash-ai) with an [MIT license](https://mit-license.org/). The source code can be run offline on any machine that can install [Docker and Docker-compose](www.docker.com). [Documentation](https://github.com/code4sac/trash-ai#ai-for-litter-detection-web-application) is maintained by Code for Sacramento and Open Fresno on Github and will be updated with each release. [Nonexhaustive instructions for AWS deployment](https://github.com/code4sac/trash-ai/blob/manuscript/docs/git-aws-account-setup.md) is available for anyone attempting production level deployment. The image datasets shared to the tool are in an S3 Bucket that needs to be reviewed before being shared with others due to security and moderation concerns but can be acquired by [contacting the repo maintaniers](https://github.com/code4sac/trash-ai/graphs/contributors).
From 21ad88f5d61a83cf08310bd2b93712986c5b13a9 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 31 Oct 2022 10:26:26 +0100
Subject: [PATCH 47/73]
https://github.com/code4sac/trash-ai/pull/69#discussion_r1007668618
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index cffa143..eb62968 100644
--- a/paper.md
+++ b/paper.md
@@ -108,6 +108,6 @@ Trash AI is hosted on the web at www.trashai.org. The source code is [available
This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get the data that people share to our S3 bucket into the [TACO training dataset](http://tacodataset.org/) and trained model. Future models will expand the annotations to include the `Trash Taxonomy` [@Hapich:2022] classes and add an option to choose between other models besides the current model.
# Acknowledgements
-Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of code for America, without whom this project would not have been possible and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular we would like to acknowledge Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation.
+Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of Code for America, without whom this project would not have been possible, and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular we would like to acknowledge Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, and the University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation.
# References
From 83e1fd4fd0f8f1379ad3d26a37ed4c7f03ca6cd7 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Fri, 18 Nov 2022 19:44:04 +0100
Subject: [PATCH 48/73] add kris's comments
---
docs/localdev.md | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/docs/localdev.md b/docs/localdev.md
index a75ff6d..171295c 100644
--- a/docs/localdev.md
+++ b/docs/localdev.md
@@ -2,7 +2,7 @@
When doing local development, you can run this stack in the background and
edit files in the `/backend` and `/frontend` directories and the environment
-with automatically update.
+with automatic update.
The listening port for the web frontend defaults to `http://localhost:5150`,
The backend is exposed via `http://localhost:4000` by default.
@@ -13,8 +13,9 @@ These values can be adjusted by editing the localdev env file [.env](../localdev
## _*IMPORTANT*_
-It's suggested you work in branch `local` when developing
+It's suggested you work in branch `local` by creating your own local branch when developing
Pushing / merging PR's to any branches with a prefix of `aws/` will trigger deployment actions
+For full functionality you will want to get a Google Maps API key and name it VITE_GOOGLE_MAPS_API_KEY, but it is not required
---
From 29d22657753d847725e031f14de85d1e84a27acf Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Fri, 18 Nov 2022 19:45:33 +0100
Subject: [PATCH 49/73] add WSL2 link
---
docs/localdev.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/localdev.md b/docs/localdev.md
index 171295c..f9a355f 100644
--- a/docs/localdev.md
+++ b/docs/localdev.md
@@ -23,7 +23,7 @@ For full functionality you will want to get a Google Maps API key and name it VI
- Linux
- MacOS (testing)
-- Windows using WSL2
+- Windows using [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install)
- WSL Integration on Ubuntu must be enabled in Docker options -> Resources -> WSL Integration
- Repo must be inside Ubuntu (i.e. ~/code/trash-ai)
- Make local must be run from a WSL (Ubuntu) terminal
From 9633408f2d4f836351313ef2c8e8f30424616586 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Fri, 18 Nov 2022 22:17:32 +0100
Subject: [PATCH 50/73] Update paper.md
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index eb62968..13c84ea 100644
--- a/paper.md
+++ b/paper.md
@@ -108,6 +108,6 @@ Trash AI is hosted on the web at www.trashai.org. The source code is [available
This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get the data that people share to our S3 bucket into the [TACO training dataset](http://tacodataset.org/) and trained model. Future models will expand the annotations to include the `Trash Taxonomy` [@Hapich:2022] classes and add an option to choose between other models besides the current model.
# Acknowledgements
-Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of Code for America, without whom this project would not have been possible, and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular we would like to acknowledge Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, and the University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation.
+Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of Code for America, without whom this project would not have been possible, and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular we would like to acknowledge J.Z. Zhang, Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, and the University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation.
# References
From 327bf7558d4ca21e302007eb67290bac4bd0e17c Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Fri, 18 Nov 2022 22:24:01 +0100
Subject: [PATCH 51/73] add acknowledgements
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index 13c84ea..3dc63e7 100644
--- a/paper.md
+++ b/paper.md
@@ -108,6 +108,6 @@ Trash AI is hosted on the web at www.trashai.org. The source code is [available
This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get the data that people share to our S3 bucket into the [TACO training dataset](http://tacodataset.org/) and trained model. Future models will expand the annotations to include the `Trash Taxonomy` [@Hapich:2022] classes and add an option to choose between other models besides the current model.
# Acknowledgements
-Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of Code for America, without whom this project would not have been possible, and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular we would like to acknowledge J.Z. Zhang, Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, and the University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation.
+Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of Code for America, without whom this project would not have been possible, and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular we would like to acknowledge Kevin Fries, J.Z. Zhang, Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, and the University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation.
# References
From 640e465f6b904b30308889c2a8fa56228e9c5fd0 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 28 Nov 2022 09:10:03 +0100
Subject: [PATCH 52/73]
https://github.com/code4sac/trash-ai/pull/69#discussion_r1033145379
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index 3dc63e7..c5419c0 100644
--- a/paper.md
+++ b/paper.md
@@ -52,7 +52,7 @@ bibliography: paper.bib
Although computer vision classification routines have been created for trash, they have not been accessible to most researchers due to the challenges in deploying the models. Trash AI is a web GUI (Graphical User Interface) for serverless computer vision classification of individual items of trash within images, hosted at www.trashai.org. With a single batch upload and download, a user can automatically describe the types and quantities of trash in all of their images.
# Statement of need
-The trash in the environment is a widespread problem that is difficult to measure. Policy makers require high quality data on trash to create effective policies. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an `efficient yet effective manner` [@Majchrowska:2022; @Proença:2020; @Moore:2020; @vanLieshout:2020; @WADEAI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]. An app-based reporting of trash using cell phones, laptops, and other devices has been a `valuable solution` [@Lynch:2018]. Applications for AI in detecting trash currently include: images from `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average litter researcher. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g., tensorflow.js) and serverless architecture (e.g., AWS Lambda) have created the opportunity to have affordable browser-side artificial intelligence in a web GUI, alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
+Trash in the environment is a widespread problem that is difficult to measure. Policy makers require high quality data on trash to create effective policies. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an `efficient yet effective manner` [@Majchrowska:2022; @Proença:2020; @Moore:2020; @vanLieshout:2020; @WADEAI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]. An app-based reporting of trash using cell phones, laptops, and other devices has been a `valuable solution` [@Lynch:2018]. Applications for AI in detecting trash currently include: images from `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average litter researcher. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g., tensorflow.js) and serverless architecture (e.g., AWS Lambda) have created the opportunity to have affordable browser-side artificial intelligence in a web GUI, alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
# Demo
We have a full video tutorial on [Youtube](https://youtu.be/HHrjUpQynUM)
From 3273b243ce38d7909f7aa8768abf9b017109c3ad Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 28 Nov 2022 09:11:41 +0100
Subject: [PATCH 53/73]
https://github.com/code4sac/trash-ai/pull/69#discussion_r1033146187
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index c5419c0..4821015 100644
--- a/paper.md
+++ b/paper.md
@@ -99,7 +99,7 @@ Trash AI is trained on the [TACO dataset](http://tacodataset.org/) using [YOLO 5
The AI model was developed starting with the TACO dataset, which was available with a complimentary Jupyter Notebook on [Kaggle](https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced, which used the default `YOLO v5 model` [@Jocher:2020] as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
## Limitations
-From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. Trash is a word people use for an object that lacks purpose, and the purpose of an object is often not obvious in an image. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g., a drink bottle on someone's desk vs in the forest laying on the ground). This is the main challenge with any image-based trash detection algorithm. Not everything that LOOKS like trash IS trash. This and other complexities to trash classification make a general trash AI a challenging (yet worthwhile) long-term endeavor. The algorithm is primarily trained on single pieces of trash in the image, with the trash laying on the ground. Thus, model class prediction of trash in these kinds of images will generally be better than trash appearing in aerial images, for example. Additionally, user feedback has shows that the distance of trash from the camera is a critical aspect. The model performs ideally with single pieces of trash in an image less than 1 m away. The model performs less accurately on images when trash which is farther away such as when taken from a vehicle. This is likely due to the training data, TACO dataset, which consists primarily of images of trash close to the camera.
+From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. "Trash" is a word people use for an object that lacks purpose, and the purpose of an object is often not obvious in an image. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g., a drink bottle on someone's desk vs in the forest laying on the ground). This is the main challenge with any image-based trash detection algorithm. Not everything that LOOKS like trash IS trash. This and other complexities to trash classification make a general trash AI a challenging (yet worthwhile) long-term endeavor. The algorithm is primarily trained on single pieces of trash in the image, with the trash laying on the ground. Thus, model class prediction of trash in these kinds of images will generally be better than trash appearing in aerial images, for example. Additionally, user feedback has shows that the distance of trash from the camera is a critical aspect. The model performs ideally with single pieces of trash in an image less than 1 m away. The model performs less accurately on images when trash which is farther away such as when taken from a vehicle. This is likely due to the training data, TACO dataset, which consists primarily of images of trash close to the camera.
# Availability
Trash AI is hosted on the web at www.trashai.org. The source code is [available on github](https://github.com/code4sac/trash-ai) with an [MIT license](https://mit-license.org/). The source code can be run offline on any machine that can install [Docker and Docker-compose](www.docker.com). [Documentation](https://github.com/code4sac/trash-ai#ai-for-litter-detection-web-application) is maintained by Code for Sacramento and Open Fresno on Github and will be updated with each release. [Nonexhaustive instructions for AWS deployment](https://github.com/code4sac/trash-ai/blob/manuscript/docs/git-aws-account-setup.md) is available for anyone attempting production level deployment. The image datasets shared to the tool are in an S3 Bucket that needs to be reviewed before being shared with others due to security and moderation concerns but can be acquired by [contacting the repo maintaniers](https://github.com/code4sac/trash-ai/graphs/contributors).
From a582b32e159125913a9b0c0ab7bc996c04c9baa5 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 28 Nov 2022 09:13:55 +0100
Subject: [PATCH 54/73]
https://github.com/code4sac/trash-ai/pull/69#discussion_r1024726101
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index 4821015..57d97f6 100644
--- a/paper.md
+++ b/paper.md
@@ -108,6 +108,6 @@ Trash AI is hosted on the web at www.trashai.org. The source code is [available
This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get the data that people share to our S3 bucket into the [TACO training dataset](http://tacodataset.org/) and trained model. Future models will expand the annotations to include the `Trash Taxonomy` [@Hapich:2022] classes and add an option to choose between other models besides the current model.
# Acknowledgements
-Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of Code for America, without whom this project would not have been possible, and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular we would like to acknowledge Kevin Fries, J.Z. Zhang, Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, and the University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation.
+Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of Code for America, without whom this project would not have been possible, and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular, we would like to acknowledge Kevin Fries, J.Z. Zhang, Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, and University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation.
# References
From 81454c0e1ebea879dec5444a9ccfe9f5b7c324fd Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Fri, 30 Dec 2022 18:03:35 -0800
Subject: [PATCH 55/73] add submitted badge
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index a64499f..7732668 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
# Trash AI: Web application for serverless image classification of trash
[![Website](https://img.shields.io/badge/Web-TrashAI.org-blue)](https://www.trashai.org)
-
+[![status](https://joss.theoj.org/papers/6ffbb0f89e6c928dad6908a02639789b/status.svg)](https://joss.theoj.org/papers/6ffbb0f89e6c928dad6908a02639789b)
### Project Information
From 52380b0a1915526db7c79b5cb44daad02b525a4c Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Fri, 30 Dec 2022 18:08:18 -0800
Subject: [PATCH 56/73] update dois
---
paper.bib | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/paper.bib b/paper.bib
index 562d126..585e7b8 100644
--- a/paper.bib
+++ b/paper.bib
@@ -88,7 +88,8 @@ @ARTICLE{Hapich:2022
number = 1,
pages = "15",
month = jun,
- year = 2022
+ year = 2022,
+ doi = 10.1186/s43591-022-00035-1
}
@misc{Waterboards:2018,
@@ -108,7 +109,7 @@ @article{vanLieshout:2020
number = {8},
pages = {e2019EA000960},
keywords = {plastic pollution, object detection, automated monitoring, deep learning, artificial intelligence, river plastic},
- doi = {https://doi.org/10.1029/2019EA000960},
+ doi = {10.1029/2019EA000960},
url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2019EA000960},
eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2019EA000960},
note = {e2019EA000960 10.1029/2019EA000960},
@@ -145,7 +146,8 @@ @ARTICLE{Lynch:2018
number = 1,
pages = "6",
month = jun,
- year = 2018
+ year = 2018,
+ doi = 10.1186/s40965-018-0050-y
}
@@ -176,7 +178,7 @@ @article{Majchrowska:2022
pages = {274-284},
year = {2022},
issn = {0956-053X},
-doi = {https://doi.org/10.1016/j.wasman.2021.12.001},
+doi = {10.1016/j.wasman.2021.12.001},
url = {https://www.sciencedirect.com/science/article/pii/S0956053X21006474},
author = {Sylwia Majchrowska and Agnieszka Mikołajczyk and Maria Ferlin and Zuzanna Klawikowska and Marta A. Plantykow and Arkadiusz Kwasigroch and Karol Majek},
keywords = {Object detection, Semi-supervised learning, Waste classification benchmarks, Waste detection benchmarks, Waste localization, Waste recognition},
From 2527ec10682118e6ba60a4dba2026e0460e82b73 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Fri, 12 May 2023 08:51:03 -0700
Subject: [PATCH 57/73] Update paper.md
https://github.com/code4sac/trash-ai/issues/122
---
paper.md | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/paper.md b/paper.md
index 57d97f6..ff4cf19 100644
--- a/paper.md
+++ b/paper.md
@@ -93,7 +93,7 @@ We have a full video tutorial on [Youtube](https://youtu.be/HHrjUpQynUM)
# Method
## Workflow Overview
-Trash AI is trained on the [TACO dataset](http://tacodataset.org/) using [YOLO 5](pytorch.org). Trash AI stores images in [IndexDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API) to keep the data primarily browser side and uses [tensorflow.js](https://www.tensorflow.org/js) to keep analysis browser side too. When images are uploaded to the browser, Trash AI provides the prediction of the model as a graphical output. The raw data from the model and labeled images can be downloaded in a batch download to expedite analyses. Any data uploaded to the platform may be automatically saved to an [S3 bucket](https://aws.amazon.com/s3/), which we can use to improve the model over time.
+Trash AI is trained on the [TACO dataset](http://tacodataset.org/) using [YOLO 5](pytorch.org). Trash AI stores images in [IndexDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API) to keep the data primarily browser side and uses [tensorflow.js](https://www.tensorflow.org/js) to keep analysis browser side too. When images are uploaded to the browser, Trash AI provides the prediction of the model as a graphical output. The raw data from the model and labeled images can be downloaded in a batch download to expedite analyses.
## AI Training
The AI model was developed starting with the TACO dataset, which was available with a complimentary Jupyter Notebook on [Kaggle](https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced, which used the default `YOLO v5 model` [@Jocher:2020] as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
@@ -102,10 +102,10 @@ The AI model was developed starting with the TACO dataset, which was available w
From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. "Trash" is a word people use for an object that lacks purpose, and the purpose of an object is often not obvious in an image. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g., a drink bottle on someone's desk vs in the forest laying on the ground). This is the main challenge with any image-based trash detection algorithm. Not everything that LOOKS like trash IS trash. This and other complexities to trash classification make a general trash AI a challenging (yet worthwhile) long-term endeavor. The algorithm is primarily trained on single pieces of trash in the image, with the trash laying on the ground. Thus, model class prediction of trash in these kinds of images will generally be better than trash appearing in aerial images, for example. Additionally, user feedback has shows that the distance of trash from the camera is a critical aspect. The model performs ideally with single pieces of trash in an image less than 1 m away. The model performs less accurately on images when trash which is farther away such as when taken from a vehicle. This is likely due to the training data, TACO dataset, which consists primarily of images of trash close to the camera.
# Availability
-Trash AI is hosted on the web at www.trashai.org. The source code is [available on github](https://github.com/code4sac/trash-ai) with an [MIT license](https://mit-license.org/). The source code can be run offline on any machine that can install [Docker and Docker-compose](www.docker.com). [Documentation](https://github.com/code4sac/trash-ai#ai-for-litter-detection-web-application) is maintained by Code for Sacramento and Open Fresno on Github and will be updated with each release. [Nonexhaustive instructions for AWS deployment](https://github.com/code4sac/trash-ai/blob/manuscript/docs/git-aws-account-setup.md) is available for anyone attempting production level deployment. The image datasets shared to the tool are in an S3 Bucket that needs to be reviewed before being shared with others due to security and moderation concerns but can be acquired by [contacting the repo maintaniers](https://github.com/code4sac/trash-ai/graphs/contributors).
+Trash AI is hosted on the web at www.trashai.org. The source code is [available on github](https://github.com/code4sac/trash-ai) with an [MIT license](https://mit-license.org/). The source code can be run offline on any machine that can install [Docker and Docker-compose](www.docker.com). [Documentation](https://github.com/code4sac/trash-ai#ai-for-litter-detection-web-application) is maintained by Code for Sacramento and Open Fresno on Github and will be updated with each release. [Nonexhaustive instructions for AWS deployment](https://github.com/code4sac/trash-ai/blob/manuscript/docs/git-aws-account-setup.md) is available for anyone attempting production level deployment.
# Future Goals
-This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get the data that people share to our S3 bucket into the [TACO training dataset](http://tacodataset.org/) and trained model. Future models will expand the annotations to include the `Trash Taxonomy` [@Hapich:2022] classes and add an option to choose between other models besides the current model.
+This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get additional data into the [TACO training dataset](http://tacodataset.org/) by creating an option for users to share their data. Future models will expand the annotations to include the `Trash Taxonomy` [@Hapich:2022] classes and add an option to choose between other models besides the current model.
# Acknowledgements
Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of Code for America, without whom this project would not have been possible, and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular, we would like to acknowledge Kevin Fries, J.Z. Zhang, Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, and University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation.
From c659948a3cd095b71454b7ba77379a55cc446c2a Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Fri, 26 May 2023 08:37:21 -0700
Subject: [PATCH 58/73] Update paper.md
add Elizabeth to ack
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index ff4cf19..19f72e8 100644
--- a/paper.md
+++ b/paper.md
@@ -108,6 +108,6 @@ Trash AI is hosted on the web at www.trashai.org. The source code is [available
This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get additional data into the [TACO training dataset](http://tacodataset.org/) by creating an option for users to share their data. Future models will expand the annotations to include the `Trash Taxonomy` [@Hapich:2022] classes and add an option to choose between other models besides the current model.
# Acknowledgements
-Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of Code for America, without whom this project would not have been possible, and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular, we would like to acknowledge Kevin Fries, J.Z. Zhang, Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, and University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation.
+Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of Code for America, without whom this project would not have been possible, and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular, we would like to acknowledge Elizabeth Pierotti, Kevin Fries, J.Z. Zhang, Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, and University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation.
# References
From e46d9277bb859fdf222b62a748204ab302d3bb15 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Fri, 26 May 2023 08:50:31 -0700
Subject: [PATCH 59/73] Update paper.md
add funder possibility lab
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index 19f72e8..d073ad5 100644
--- a/paper.md
+++ b/paper.md
@@ -108,6 +108,6 @@ Trash AI is hosted on the web at www.trashai.org. The source code is [available
This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get additional data into the [TACO training dataset](http://tacodataset.org/) by creating an option for users to share their data. Future models will expand the annotations to include the `Trash Taxonomy` [@Hapich:2022] classes and add an option to choose between other models besides the current model.
# Acknowledgements
-Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of Code for America, without whom this project would not have been possible, and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular, we would like to acknowledge Elizabeth Pierotti, Kevin Fries, J.Z. Zhang, Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, and University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation.
+Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of Code for America, without whom this project would not have been possible, and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular, we would like to acknowledge Elizabeth Pierotti, Kevin Fries, J.Z. Zhang, Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, and University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation and the Possibility Lab.
# References
From ed979f1a283318466fdec29284047955ffbdb1d7 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Sun, 4 Jun 2023 12:49:40 -0700
Subject: [PATCH 60/73] Update paper.md
add funder.
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index d073ad5..2f270b0 100644
--- a/paper.md
+++ b/paper.md
@@ -108,6 +108,6 @@ Trash AI is hosted on the web at www.trashai.org. The source code is [available
This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get additional data into the [TACO training dataset](http://tacodataset.org/) by creating an option for users to share their data. Future models will expand the annotations to include the `Trash Taxonomy` [@Hapich:2022] classes and add an option to choose between other models besides the current model.
# Acknowledgements
-Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of Code for America, without whom this project would not have been possible, and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular, we would like to acknowledge Elizabeth Pierotti, Kevin Fries, J.Z. Zhang, Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, and University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation and the Possibility Lab.
+Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of Code for America, without whom this project would not have been possible, and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular, we would like to acknowledge Elizabeth Pierotti, Kevin Fries, J.Z. Zhang, Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, and University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation, the National Renewable Energy Laboratory, and the Possibility Lab.
# References
From 0417b28fce52297d1f10d07a7e889f1f94097902 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 26 Jun 2023 15:18:08 -0700
Subject: [PATCH 61/73] Update README.md
---
README.md | 3 +++
1 file changed, 3 insertions(+)
diff --git a/README.md b/README.md
index 7732668..248a2d7 100644
--- a/README.md
+++ b/README.md
@@ -34,5 +34,8 @@ You can simply go to www.trashai.org to start using the tool or deploy it yourse
- Runs the complex stuff so you don't have to.
+### Tests
+Instructions for automated and manual tests [here](https://github.com/code4sac/trash-ai/tree/production/frontend/__tests__).
+
## Contribute
We welcome contributions of all kinds. To get started, open an [issue](https://github.com/code4sac/trash-ai/issues) or [pull request](https://github.com/code4sac/trash-ai/pulls). Here are some ideas on [How to Contribute](https://opensource.guide/how-to-contribute/). Please adhere to this project's [Code of Conduct](https://www.contributor-covenant.org/version/2/1/code_of_conduct/).
From 690e4694919cbc7e25f4790fedd4a85dff90e8c1 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 26 Jun 2023 17:33:12 -0700
Subject: [PATCH 62/73] add r code for analyzing data
---
notebooks/data_reader/data_reader.R | 55 +++++++++++++++++++++++++++++
1 file changed, 55 insertions(+)
create mode 100644 notebooks/data_reader/data_reader.R
diff --git a/notebooks/data_reader/data_reader.R b/notebooks/data_reader/data_reader.R
new file mode 100644
index 0000000..26ff9e5
--- /dev/null
+++ b/notebooks/data_reader/data_reader.R
@@ -0,0 +1,55 @@
+#Working directory ----
+setwd("notebooks/data_reader") #Change this to your working directory
+
+#Libraries ----
+library(rio)
+library(jsonlite)
+library(ggplot2)
+library(data.table)
+
+#Data import ----
+json_list <- import_list("example_data_download2.zip")
+
+summary_metadata <- names(json_list)[grepl("summary.json", names(json_list))]
+
+image_metadata <- names(json_list)[!grepl("summary.json", names(json_list)) & !grepl("(.jpg)|(.png)|(.tif)|(schema)", names(json_list))][-1]
+
+summary_json <- json_list[[summary_metadata]]
+
+flattened_summary <- data.frame(name = summary_json$detected_objects$name,
+ count = summary_json$detected_objects$count)
+
+
+image_json <- json_list[image_metadata]
+
+flattened_images <- lapply(1:length(image_json), function(i){
+ print(i)
+ data.frame(hash = image_json[[i]]$hash,
+ filename = image_json[[i]]$filename,
+ datetime = if(!is.null(image_json[[i]]$exifdata$DateTimeOriginal)){image_json[[i]]$exifdata$DateTimeOriginal} else{NA},
+ latitude = if(!is.null(image_json[[i]]$exifdata$GPSLatitude)){image_json[[i]]$exifdata$GPSLatitude} else{NA},
+ longitude = if(!is.null(image_json[[i]]$exifdata$GPSLongitude)){image_json[[i]]$exifdata$GPSLongitude} else{NA},
+ score = if(!is.null(image_json[[i]]$metadata$score)){image_json[[i]]$metadata$score} else{NA},
+ label = if(!is.null(image_json[[i]]$metadata$label)){image_json[[i]]$metadata$label} else{NA})
+}) |>
+ rbindlist()
+
+for(i in 1:length(image_json)){
+ print(i)
+ data.frame(hash = image_json[[i]]$hash,
+ filename = image_json[[i]]$filename,
+ datetime = if(length(image_json[[i]]$exifdata) > 0){image_json[[i]]$exifdata$DateTimeOriginal} else{NA},
+ latitude = if(length(image_json[[i]]$exifdata) > 0){image_json[[i]]$exifdata$GPSLatitude} else{NA},
+ longitude = if(length(image_json[[i]]$exifdata) > 0){image_json[[i]]$exifdata$GPSLongitude} else{NA},
+ score = if(length(image_json[[i]]$metadata) > 0){image_json[[i]]$metadata$score} else{NA},
+ label = if(length(image_json[[i]]$metadata) > 0){image_json[[i]]$metadata$label} else{NA})
+}
+
+
+#Figure creation ----
+ggplot(flattened_summary, aes(y = reorder(name, count), x = count, fill = name)) +
+ geom_bar(stat = "identity") +
+ theme_classic(base_size = 15) +
+ theme(legend.position = "none") +
+ labs(x = "Count", y = "Type")
+
From 61f40b900acbcff661712f03cf213124d31909ed Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 26 Jun 2023 17:34:27 -0700
Subject: [PATCH 63/73] remove unnecessary code.
---
notebooks/data_reader/data_reader.R | 11 -----------
1 file changed, 11 deletions(-)
diff --git a/notebooks/data_reader/data_reader.R b/notebooks/data_reader/data_reader.R
index 26ff9e5..a8f8a8a 100644
--- a/notebooks/data_reader/data_reader.R
+++ b/notebooks/data_reader/data_reader.R
@@ -34,17 +34,6 @@ flattened_images <- lapply(1:length(image_json), function(i){
}) |>
rbindlist()
-for(i in 1:length(image_json)){
- print(i)
- data.frame(hash = image_json[[i]]$hash,
- filename = image_json[[i]]$filename,
- datetime = if(length(image_json[[i]]$exifdata) > 0){image_json[[i]]$exifdata$DateTimeOriginal} else{NA},
- latitude = if(length(image_json[[i]]$exifdata) > 0){image_json[[i]]$exifdata$GPSLatitude} else{NA},
- longitude = if(length(image_json[[i]]$exifdata) > 0){image_json[[i]]$exifdata$GPSLongitude} else{NA},
- score = if(length(image_json[[i]]$metadata) > 0){image_json[[i]]$metadata$score} else{NA},
- label = if(length(image_json[[i]]$metadata) > 0){image_json[[i]]$metadata$label} else{NA})
-}
-
#Figure creation ----
ggplot(flattened_summary, aes(y = reorder(name, count), x = count, fill = name)) +
From 1bde62562117e864affc9cc96c8e605eb42bcea7 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 26 Jun 2023 18:22:07 -0700
Subject: [PATCH 64/73] trying to fix the unexpected period issue, not sure
where it is coming from.
---
paper.bib | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/paper.bib b/paper.bib
index 585e7b8..6c34550 100644
--- a/paper.bib
+++ b/paper.bib
@@ -113,7 +113,6 @@ @article{vanLieshout:2020
url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2019EA000960},
eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2019EA000960},
note = {e2019EA000960 10.1029/2019EA000960},
- abstract = {Abstract Quantifying plastic pollution on surface water is essential to understand and mitigate the impact of plastic pollution to the environment. Current monitoring methods such as visual counting are labor intensive. This limits the feasibility of scaling to long-term monitoring at multiple locations. We present an automated method for monitoring plastic pollution that overcomes this limitation. Floating macroplastics are detected from images of the water surface using deep learning. We perform an experimental evaluation of our method using images from bridge-mounted cameras at five different river locations across Jakarta, Indonesia. The four main results of the experimental evaluation are as follows. First, we realize a method that obtains a reliable estimate of plastic density (68.7\% precision). Our monitoring method successfully distinguishes plastics from environmental elements, such as water surface reflection and organic waste. Second, when trained on one location, the method generalizes well to new locations with relatively similar conditions without retraining (≈50\% average precision). Third, generalization to new locations with considerably different conditions can be boosted by retraining on only 50 objects of the new location (improving precision from ≈20\% to ≈42\%). Fourth, our method matches visual counting methods and detects ≈35\% more plastics, even more so during periods of plastic transport rates of above 10 items per meter per minute. Taken together, these results demonstrate that our method is a promising way of monitoring plastic pollution. By extending the variety of the data set the monitoring method can be readily applied at a larger scale.},
year = {2020}
}
@@ -181,8 +180,7 @@ @article{Majchrowska:2022
doi = {10.1016/j.wasman.2021.12.001},
url = {https://www.sciencedirect.com/science/article/pii/S0956053X21006474},
author = {Sylwia Majchrowska and Agnieszka Mikołajczyk and Maria Ferlin and Zuzanna Klawikowska and Marta A. Plantykow and Arkadiusz Kwasigroch and Karol Majek},
-keywords = {Object detection, Semi-supervised learning, Waste classification benchmarks, Waste detection benchmarks, Waste localization, Waste recognition},
-abstract = {Waste pollution is one of the most significant environmental issues in the modern world. The importance of recycling is well known, both for economic and ecological reasons, and the industry demands high efficiency. Current studies towards automatic waste detection are hardly comparable due to the lack of benchmarks and widely accepted standards regarding the used metrics and data. Those problems are addressed in this article by providing a critical analysis of over ten existing waste datasets and a brief but constructive review of the existing Deep Learning-based waste detection approaches. This article collects and summarizes previous studies and provides the results of authors’ experiments on the presented datasets, all intended to create a first replicable baseline for litter detection. Moreover, new benchmark datasets detect-waste and classify-waste are proposed that are merged collections from the above-mentioned open-source datasets with unified annotations covering all possible waste categories: bio, glass, metal and plastic, non-recyclable, other, paper, and unknown. Finally, a two-stage detector for litter localization and classification is presented. EfficientDet-D2 is used to localize litter, and EfficientNet-B2 to classify the detected waste into seven categories. The classifier is trained in a semi-supervised fashion making the use of unlabeled images. The proposed approach achieves up to 70% of average precision in waste detection and around 75% of classification accuracy on the test dataset. The code and annotations used in the studies are publicly available online11https://github.com/wimlds-trojmiasto/detect-waste..}
+keywords = {Object detection, Semi-supervised learning, Waste classification benchmarks, Waste detection benchmarks, Waste localization, Waste recognition}
}
@misc{Proença:2020,
From bb1c099ae3ff87710a39b48fd8dfc764c25e6791 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 26 Jun 2023 18:31:18 -0700
Subject: [PATCH 65/73] revert bib to test
---
paper.bib | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)
diff --git a/paper.bib b/paper.bib
index 6c34550..71b343e 100644
--- a/paper.bib
+++ b/paper.bib
@@ -88,8 +88,7 @@ @ARTICLE{Hapich:2022
number = 1,
pages = "15",
month = jun,
- year = 2022,
- doi = 10.1186/s43591-022-00035-1
+ year = 2022
}
@misc{Waterboards:2018,
@@ -109,7 +108,7 @@ @article{vanLieshout:2020
number = {8},
pages = {e2019EA000960},
keywords = {plastic pollution, object detection, automated monitoring, deep learning, artificial intelligence, river plastic},
- doi = {10.1029/2019EA000960},
+ doi = {https://doi.org/10.1029/2019EA000960},
url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2019EA000960},
eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2019EA000960},
note = {e2019EA000960 10.1029/2019EA000960},
@@ -145,8 +144,7 @@ @ARTICLE{Lynch:2018
number = 1,
pages = "6",
month = jun,
- year = 2018,
- doi = 10.1186/s40965-018-0050-y
+ year = 2018
}
@@ -177,10 +175,11 @@ @article{Majchrowska:2022
pages = {274-284},
year = {2022},
issn = {0956-053X},
-doi = {10.1016/j.wasman.2021.12.001},
+doi = {https://doi.org/10.1016/j.wasman.2021.12.001},
url = {https://www.sciencedirect.com/science/article/pii/S0956053X21006474},
author = {Sylwia Majchrowska and Agnieszka Mikołajczyk and Maria Ferlin and Zuzanna Klawikowska and Marta A. Plantykow and Arkadiusz Kwasigroch and Karol Majek},
-keywords = {Object detection, Semi-supervised learning, Waste classification benchmarks, Waste detection benchmarks, Waste localization, Waste recognition}
+keywords = {Object detection, Semi-supervised learning, Waste classification benchmarks, Waste detection benchmarks, Waste localization, Waste recognition},
+abstract = {Waste pollution is one of the most significant environmental issues in the modern world. The importance of recycling is well known, both for economic and ecological reasons, and the industry demands high efficiency. Current studies towards automatic waste detection are hardly comparable due to the lack of benchmarks and widely accepted standards regarding the used metrics and data. Those problems are addressed in this article by providing a critical analysis of over ten existing waste datasets and a brief but constructive review of the existing Deep Learning-based waste detection approaches. This article collects and summarizes previous studies and provides the results of authors’ experiments on the presented datasets, all intended to create a first replicable baseline for litter detection. Moreover, new benchmark datasets detect-waste and classify-waste are proposed that are merged collections from the above-mentioned open-source datasets with unified annotations covering all possible waste categories: bio, glass, metal and plastic, non-recyclable, other, paper, and unknown. Finally, a two-stage detector for litter localization and classification is presented. EfficientDet-D2 is used to localize litter, and EfficientNet-B2 to classify the detected waste into seven categories. The classifier is trained in a semi-supervised fashion making the use of unlabeled images. The proposed approach achieves up to 70% of average precision in waste detection and around 75% of classification accuracy on the test dataset. The code and annotations used in the studies are publicly available online11https://github.com/wimlds-trojmiasto/detect-waste..}
}
@misc{Proença:2020,
@@ -193,4 +192,3 @@ @misc{Proença:2020
year = {2020},
copyright = {arXiv.org perpetual, non-exclusive license}
}
-
From 9c0863c291ff9f3ec1e4c00bdcac100fe46437a8 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Mon, 26 Jun 2023 18:37:28 -0700
Subject: [PATCH 66/73] add dois
---
paper.bib | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/paper.bib b/paper.bib
index 71b343e..36defc9 100644
--- a/paper.bib
+++ b/paper.bib
@@ -88,7 +88,8 @@ @ARTICLE{Hapich:2022
number = 1,
pages = "15",
month = jun,
- year = 2022
+ year = 2022,
+ doi = "10.1186/s43591-022-00035-1"
}
@misc{Waterboards:2018,
@@ -108,7 +109,7 @@ @article{vanLieshout:2020
number = {8},
pages = {e2019EA000960},
keywords = {plastic pollution, object detection, automated monitoring, deep learning, artificial intelligence, river plastic},
- doi = {https://doi.org/10.1029/2019EA000960},
+ doi = {10.1029/2019EA000960},
url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2019EA000960},
eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2019EA000960},
note = {e2019EA000960 10.1029/2019EA000960},
@@ -144,7 +145,8 @@ @ARTICLE{Lynch:2018
number = 1,
pages = "6",
month = jun,
- year = 2018
+ year = 2018,
+ doi = "10.1186/s40965-018-0050-y"
}
@@ -175,7 +177,7 @@ @article{Majchrowska:2022
pages = {274-284},
year = {2022},
issn = {0956-053X},
-doi = {https://doi.org/10.1016/j.wasman.2021.12.001},
+doi = {10.1016/j.wasman.2021.12.001},
url = {https://www.sciencedirect.com/science/article/pii/S0956053X21006474},
author = {Sylwia Majchrowska and Agnieszka Mikołajczyk and Maria Ferlin and Zuzanna Klawikowska and Marta A. Plantykow and Arkadiusz Kwasigroch and Karol Majek},
keywords = {Object detection, Semi-supervised learning, Waste classification benchmarks, Waste detection benchmarks, Waste localization, Waste recognition},
From 03b50dd8bbe78faad95223e19106a33c80b2a9fd Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Tue, 27 Jun 2023 08:53:33 -0700
Subject: [PATCH 67/73] update paper acknowledgments and links.
---
paper.md | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/paper.md b/paper.md
index 2f270b0..87afa48 100644
--- a/paper.md
+++ b/paper.md
@@ -52,7 +52,7 @@ bibliography: paper.bib
Although computer vision classification routines have been created for trash, they have not been accessible to most researchers due to the challenges in deploying the models. Trash AI is a web GUI (Graphical User Interface) for serverless computer vision classification of individual items of trash within images, hosted at www.trashai.org. With a single batch upload and download, a user can automatically describe the types and quantities of trash in all of their images.
# Statement of need
-Trash in the environment is a widespread problem that is difficult to measure. Policy makers require high quality data on trash to create effective policies. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an `efficient yet effective manner` [@Majchrowska:2022; @Proença:2020; @Moore:2020; @vanLieshout:2020; @WADEAI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]. An app-based reporting of trash using cell phones, laptops, and other devices has been a `valuable solution` [@Lynch:2018]. Applications for AI in detecting trash currently include: images from `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average litter researcher. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g., tensorflow.js) and serverless architecture (e.g., AWS Lambda) have created the opportunity to have affordable browser-side artificial intelligence in a web GUI, alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
+Trash in the environment is a widespread problem that is difficult to measure. Policy makers require high quality data on trash to create effective policies. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an `efficient yet effective manner` [@Majchrowska:2022; @Proença:2020; @Moore:2020; @vanLieshout:2020; @WADEAI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]. Image-based reporting of trash using cell phones, laptops, and other devices has been a `valuable solution` [@Lynch:2018]. Applications for AI in detecting trash using imagery currently include: cameras mounted on `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average litter researcher. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g., tensorflow.js) and serverless architecture (e.g., AWS Lambda) have created the opportunity to have affordable browser-side artificial intelligence in a web GUI, alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
# Demo
We have a full video tutorial on [Youtube](https://youtu.be/HHrjUpQynUM)
@@ -72,7 +72,7 @@ We have a full video tutorial on [Youtube](https://youtu.be/HHrjUpQynUM)
### 4.
-![View results mapped if your images have location stamp.\label{fig:example4}](https://user-images.githubusercontent.com/26821843/188520745-65ef3270-6093-488a-b501-305ecb436bc1.png)
+![View results mapped if the images have location stamp.\label{fig:example4}](https://user-images.githubusercontent.com/26821843/188520745-65ef3270-6093-488a-b501-305ecb436bc1.png)
### 5.
@@ -93,21 +93,21 @@ We have a full video tutorial on [Youtube](https://youtu.be/HHrjUpQynUM)
# Method
## Workflow Overview
-Trash AI is trained on the [TACO dataset](http://tacodataset.org/) using [YOLO 5](pytorch.org). Trash AI stores images in [IndexDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API) to keep the data primarily browser side and uses [tensorflow.js](https://www.tensorflow.org/js) to keep analysis browser side too. When images are uploaded to the browser, Trash AI provides the prediction of the model as a graphical output. The raw data from the model and labeled images can be downloaded in a batch download to expedite analyses.
+Trash AI is trained on the [TACO dataset](http://tacodataset.org/) using [YOLO 5](https://pytorch.org/). Trash AI stores images in [IndexDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API) to keep the data primarily browser side and uses [tensorflow.js](https://www.tensorflow.org/js) to keep analysis browser side too. When images are uploaded to the browser, Trash AI provides the prediction of the model as a graphical output. The raw data from the model and labeled images can be downloaded in batch to expedite analyses.
## AI Training
The AI model was developed starting with the TACO dataset, which was available with a complimentary Jupyter Notebook on [Kaggle](https://www.kaggle.com/datasets/kneroma/tacotrashdataset). An example notebook was referenced, which used the default `YOLO v5 model` [@Jocher:2020] as the basic model to begin transfer learning. Next, transfer learning was completed using the entire TACO dataset to import the image classes and annotations in the YOLO v5 model.
## Limitations
-From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. "Trash" is a word people use for an object that lacks purpose, and the purpose of an object is often not obvious in an image. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g., a drink bottle on someone's desk vs in the forest laying on the ground). This is the main challenge with any image-based trash detection algorithm. Not everything that LOOKS like trash IS trash. This and other complexities to trash classification make a general trash AI a challenging (yet worthwhile) long-term endeavor. The algorithm is primarily trained on single pieces of trash in the image, with the trash laying on the ground. Thus, model class prediction of trash in these kinds of images will generally be better than trash appearing in aerial images, for example. Additionally, user feedback has shows that the distance of trash from the camera is a critical aspect. The model performs ideally with single pieces of trash in an image less than 1 m away. The model performs less accurately on images when trash which is farther away such as when taken from a vehicle. This is likely due to the training data, TACO dataset, which consists primarily of images of trash close to the camera.
+From our experience, the accuracy of the model varies depending on the quality of the images and their context/background. "Trash" is a word people use for an object that lacks purpose, and the purpose of an object is often not obvious in an image. Trash is a nuanced classification because the same object in different settings will not be considered trash (e.g., a drink bottle on someone's desk vs in the forest lying on the ground). This is the main challenge with any image-based trash detection algorithm. Not everything that LOOKS like trash IS trash. This and other complexities to trash classification make a general trash AI a challenging (yet worthwhile) long-term endeavor. The algorithm is primarily trained on the TACO dataset, which is composed of images of single pieces of trash, with the trash lying on the ground (< 1 m away). Thus, model class prediction of trash in these kinds of images will generally be better than trash appearing in aerial images or imaged from a vehicle, for example.
# Availability
-Trash AI is hosted on the web at www.trashai.org. The source code is [available on github](https://github.com/code4sac/trash-ai) with an [MIT license](https://mit-license.org/). The source code can be run offline on any machine that can install [Docker and Docker-compose](www.docker.com). [Documentation](https://github.com/code4sac/trash-ai#ai-for-litter-detection-web-application) is maintained by Code for Sacramento and Open Fresno on Github and will be updated with each release. [Nonexhaustive instructions for AWS deployment](https://github.com/code4sac/trash-ai/blob/manuscript/docs/git-aws-account-setup.md) is available for anyone attempting production level deployment.
+Trash AI is hosted on the web at www.trashai.org. The source code is [available on github](https://github.com/code4sac/trash-ai) with an [MIT license](https://mit-license.org/). The source code can be run offline on any machine that can install [Docker and Docker-compose](www.docker.com). [Documentation](https://github.com/code4sac/trash-ai#trash-ai-web-application-for-serverless-image-classification-of-trash) is maintained by Code for Sacramento and Open Fresno on Github and will be updated with each release. [Nonexhaustive instructions for AWS deployment](https://github.com/code4sac/trash-ai/blob/manuscript/docs/git-aws-account-setup.md) is available for anyone attempting production level deployment.
# Future Goals
This workflow is likely to be highly useful for a wide variety of computer vision applications and we hope that people reuse the code for applications beyond trash detection. We aim to increase the labeling of images by creating a user interface that allows users to improve the annotations that the model is currently predicting by manually restructuring the bounding boxes and relabeling the classes. We aim to work in collaboration with the TACO development team to improve our workflow integration to get additional data into the [TACO training dataset](http://tacodataset.org/) by creating an option for users to share their data. Future models will expand the annotations to include the `Trash Taxonomy` [@Hapich:2022] classes and add an option to choose between other models besides the current model.
# Acknowledgements
-Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of Code for America, without whom this project would not have been possible, and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular, we would like to acknowledge Elizabeth Pierotti, Kevin Fries, J.Z. Zhang, Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, and University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation, the National Renewable Energy Laboratory, and the Possibility Lab.
+Code for Sacramento and Open Fresno led the development of the software tool. The Moore Institute for Plastic Pollution Research advised on priorities and led the drafting of this manuscript. Let's Do It Foundation assisted with original products leading up to trash AI in the development of WADE AI. We acknowledge the work of the Code for Sacramento and Open Fresno team, brigades of Code for America, without whom this project would not have been possible, and acknowledge the input of the California Water Monitoring Council Trash Monitoring Workgroup. In particular, we would like to acknowledge Gary Conley, Tony Hale, Emin Israfil, Tom Novotny, Margaret McCauley, Julian Fulton, Janna Taing, Elizabeth Pierotti, Kevin Fries, J.Z. Zhang, Joseph Falkner, Democracy Lab, Brad Anderson, Jim Ewald, Don Brower, and University of Houston. We acknowledge financial support from McPike Zima Charitable Foundation, the National Renewable Energy Laboratory, and the Possibility Lab.
# References
From 4d2f090ffde7d12cfae0a1296317857058c82b4f Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Tue, 27 Jun 2023 11:33:54 -0700
Subject: [PATCH 68/73] add comments.
---
notebooks/data_reader/data_reader.R | 17 ++++++++++++-----
1 file changed, 12 insertions(+), 5 deletions(-)
diff --git a/notebooks/data_reader/data_reader.R b/notebooks/data_reader/data_reader.R
index a8f8a8a..219b0c6 100644
--- a/notebooks/data_reader/data_reader.R
+++ b/notebooks/data_reader/data_reader.R
@@ -7,21 +7,25 @@ library(jsonlite)
library(ggplot2)
library(data.table)
-#Data import ----
+# Data import ----
json_list <- import_list("example_data_download2.zip")
+# Get path of the summary table.
summary_metadata <- names(json_list)[grepl("summary.json", names(json_list))]
-image_metadata <- names(json_list)[!grepl("summary.json", names(json_list)) & !grepl("(.jpg)|(.png)|(.tif)|(schema)", names(json_list))][-1]
+# Get path of the image metadata.
+image_metadata <- names(json_list)[!grepl("(.jpg)|(.png)|(.tif)|(schema)|(summary)", names(json_list))][-1]
+# Filter the summary data.
summary_json <- json_list[[summary_metadata]]
+# Flatten the summary data.
flattened_summary <- data.frame(name = summary_json$detected_objects$name,
count = summary_json$detected_objects$count)
-
-
+# Filter the image data.
image_json <- json_list[image_metadata]
+# Flatten the image data.
flattened_images <- lapply(1:length(image_json), function(i){
print(i)
data.frame(hash = image_json[[i]]$hash,
@@ -34,8 +38,11 @@ flattened_images <- lapply(1:length(image_json), function(i){
}) |>
rbindlist()
+# Test equivalence in counts.
+nrow(flattened_images[!is.na(flattened_images$label),]) == sum(flattened_summary$count)
+
-#Figure creation ----
+# Figure creation ----
ggplot(flattened_summary, aes(y = reorder(name, count), x = count, fill = name)) +
geom_bar(stat = "identity") +
theme_classic(base_size = 15) +
From f7faa353fa8676713d1ecbb2b07e07092c76687f Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Tue, 27 Jun 2023 12:16:42 -0700
Subject: [PATCH 69/73] remove empty line
---
notebooks/data_reader/data_reader.R | 1 -
1 file changed, 1 deletion(-)
diff --git a/notebooks/data_reader/data_reader.R b/notebooks/data_reader/data_reader.R
index 219b0c6..a8855ba 100644
--- a/notebooks/data_reader/data_reader.R
+++ b/notebooks/data_reader/data_reader.R
@@ -41,7 +41,6 @@ flattened_images <- lapply(1:length(image_json), function(i){
# Test equivalence in counts.
nrow(flattened_images[!is.na(flattened_images$label),]) == sum(flattened_summary$count)
-
# Figure creation ----
ggplot(flattened_summary, aes(y = reorder(name, count), x = count, fill = name)) +
geom_bar(stat = "identity") +
From 132b99772d591442f174d1e40366056141c07d30 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Tue, 27 Jun 2023 14:06:33 -0700
Subject: [PATCH 70/73] Update about.vue
update about
---
frontend/src/views/about.vue | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/frontend/src/views/about.vue b/frontend/src/views/about.vue
index 91264f2..f621c35 100644
--- a/frontend/src/views/about.vue
+++ b/frontend/src/views/about.vue
@@ -54,9 +54,8 @@
Disclaimer about uploaded images
The current version of Trash AI and the model we are using is just a
- start! When you upload an image, we are storing the image and the
- classification in an effort to expand the trash dataset and improve
- the model over time.
+ start! The tool works best for images of individual pieces of trash imaged less than 1 meter away from the camera.
+ We are looking for collaborators who can help us improve this project.
Reporting issues and improvements
From ece837a08e3495d521db4ed905f6a7da496022f1 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Tue, 27 Jun 2023 14:40:56 -0700
Subject: [PATCH 71/73] Update about.vue
add tutorial
---
frontend/src/views/about.vue | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/frontend/src/views/about.vue b/frontend/src/views/about.vue
index f621c35..226b672 100644
--- a/frontend/src/views/about.vue
+++ b/frontend/src/views/about.vue
@@ -23,6 +23,10 @@
To get started, visit the Upload Tab or
click here.
+
Tutorial
+
+
+
What is it?
From 84f1cc0bb874a6b6db7b08fe6d43ab620affbf12 Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Tue, 27 Jun 2023 14:43:16 -0700
Subject: [PATCH 72/73] Update README.md
update video
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 666187b..50dfdfe 100644
--- a/README.md
+++ b/README.md
@@ -15,7 +15,7 @@ Trash AI is a web application where users can upload photos of litter, which wil
#### Demo
-[![image](https://user-images.githubusercontent.com/26821843/188515526-33e1196b-6830-4187-8fe4-e68b2bd4019e.png)](https://youtu.be/HHrjUpQynUM)
+[![image](https://user-images.githubusercontent.com/26821843/188515526-33e1196b-6830-4187-8fe4-e68b2bd4019e.png)](https://youtu.be/u0DxGrbPOC0)
## Deployment
From 4a9caf12aee3c54b28da01264c2d0fda5ed05f4b Mon Sep 17 00:00:00 2001
From: "Win Cowger, PhD"
Date: Tue, 27 Jun 2023 14:43:48 -0700
Subject: [PATCH 73/73] Update paper.md
update video
---
paper.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/paper.md b/paper.md
index 87afa48..da70152 100644
--- a/paper.md
+++ b/paper.md
@@ -55,7 +55,7 @@ Although computer vision classification routines have been created for trash, th
Trash in the environment is a widespread problem that is difficult to measure. Policy makers require high quality data on trash to create effective policies. Classical measurement techniques require surveyors with pen and paper to manually quantify every piece of trash at a site. This method is time-consuming. Scientists are actively trying to address this issue by using imaging to better understand the prevalence and distribution of trash in an `efficient yet effective manner` [@Majchrowska:2022; @Proença:2020; @Moore:2020; @vanLieshout:2020; @WADEAI:2020; @Lynch:2018; @Wuu:2018; @Waterboards:2018]. Image-based reporting of trash using cell phones, laptops, and other devices has been a `valuable solution` [@Lynch:2018]. Applications for AI in detecting trash using imagery currently include: cameras mounted on `bridges` [@vanLieshout:2020], `drone imaging` [@Moore:2020], cameras on `street sweepers` [@Waterboards:2018], and cell phone app based reporting of `trash` [@Lynch:2018]. Although there are many artificial intelligence algorithms developed for trash classification, none are readily accessible to the average litter researcher. The primary limitation is that artificial intelligence (AI) algorithms are primarily run through programming languages (not graphic user interfaces), difficult to deploy without AI expertise, and often live on a server (which costs money to host). New developments in browser-side AI (e.g., tensorflow.js) and serverless architecture (e.g., AWS Lambda) have created the opportunity to have affordable browser-side artificial intelligence in a web GUI, alleviating both obstacles. We present Trash AI, an open source service for making computer vision available to anyone with a web browser and images of trash.
# Demo
-We have a full video tutorial on [Youtube](https://youtu.be/HHrjUpQynUM)
+We have a full video tutorial on [Youtube](https://youtu.be/u0DxGrbPOC0)
## Basic workflow:
### 1.